Secure erase tests

Implement simple secure erase tests. Perform IO that will trigger
copying of Data buffers and make sure OCF calls secure erase on them.

Signed-off-by: Jan Musial <jan.musial@intel.com>
This commit is contained in:
Jan Musial 2019-04-16 10:57:19 +02:00
parent d4e929140e
commit 75c3948f6d
8 changed files with 290 additions and 15 deletions

View File

@ -119,7 +119,7 @@ class Cache:
metadata_volatile: bool = False,
max_queue_size: int = DEFAULT_BACKFILL_QUEUE_SIZE,
queue_unblock_size: int = DEFAULT_BACKFILL_UNBLOCK,
locked: bool = True,
locked: bool = False,
pt_unaligned_io: bool = DEFAULT_PT_UNALIGNED_IO,
use_submit_fast: bool = DEFAULT_USE_SUBMIT_FAST,
):
@ -181,19 +181,26 @@ class Cache:
def change_cache_mode(self, cache_mode: CacheMode):
self.get_and_write_lock()
status = self.owner.lib.ocf_mngt_cache_set_mode(self.cache_handle, cache_mode)
if status:
self.put_and_write_unlock()
raise OcfError("Error changing cache mode", status)
self.put_and_write_unlock()
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
self.get_and_write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(self.cache_handle, cleaning_policy)
if status:
self.put_and_write_unlock()
raise OcfError("Error changing cleaning policy", status)
self.put_and_write_unlock()
def set_cleaning_policy_param(self, cleaning_policy: CleaningPolicy, param_id, param_value):
self.get_and_write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
self.cache_handle,
cleaning_policy,
@ -201,14 +208,19 @@ class Cache:
param_value
)
if status:
self.put_and_write_unlock()
raise OcfError("Error setting cleaning policy param", status)
self.put_and_write_unlock()
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.get_and_write_lock()
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(self.cache_handle, policy)
if status:
self.put_and_write_unlock()
raise OcfError("Error setting cache seq cut off policy", status)
self.put_and_write_unlock()
def configure_device(
@ -238,6 +250,7 @@ class Cache:
self, device, force=False, perform_test=False, cache_line_size=None
):
self.configure_device(device, force, perform_test, cache_line_size)
self.get_and_write_lock()
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
@ -249,8 +262,11 @@ class Cache:
c.wait()
if c.results["error"]:
self.put_and_write_unlock()
raise OcfError("Attaching cache device failed", c.results["error"])
self.put_and_write_unlock()
def load_cache(self, device):
self.configure_device(device)
c = OcfCompletion(

View File

@ -140,9 +140,12 @@ class Core:
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.cache.get_and_write_lock()
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(self.handle, policy)
if status:
self.cache.put_and_write_unlock()
raise OcfError("Error setting core seq cut off policy", status)
self.cache.put_and_write_unlock()
def reset_stats(self):

View File

@ -87,7 +87,12 @@ class OcfCtx:
if vol_type:
self.unregister_volume_type(vol_type)
def stop_caches(self):
for cache in self.caches[:]:
cache.stop()
def exit(self):
self.stop_caches()
self.cleanup_volume_types()
result = self.lib.ocf_ctx_exit(self.ctx_handle)

View File

@ -76,7 +76,7 @@ class Logger(Structure):
def __init__(self):
self.ops = LoggerOps(
_open=self._open,
_printf=cast(OcfLib.getInstance().pyocf_printf_helper, c_void_p),
_print=cast(OcfLib.getInstance().pyocf_printf_helper, c_void_p),
_close=self._close,
)
self.priv = LoggerPriv(_log=self._log)

View File

@ -3,8 +3,10 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from ctypes import c_void_p, c_int, Structure, CFUNCTYPE
from .shared import SharedOcfObject
from ctypes import c_void_p, c_int, c_uint32, Structure, CFUNCTYPE
from threading import Thread, Event
from ..ocf import OcfLib
class MetadataUpdaterOps(Structure):
@ -15,14 +17,42 @@ class MetadataUpdaterOps(Structure):
_fields_ = [("_init", INIT), ("_kick", KICK), ("_stop", STOP)]
class MetadataUpdater(SharedOcfObject):
class MetadataUpdater:
pass
def mu_run(*, mu: MetadataUpdater, kick: Event, stop: Event):
while True:
kick.clear()
if OcfLib.getInstance().ocf_metadata_updater_run(mu):
continue
kick.wait()
if stop.is_set():
break
class MetadataUpdater:
_instances_ = {}
_fields_ = [("mu", c_void_p)]
ops = None
def __init__(self):
self._as_parameter_ = self.mu
super().__init__()
def __init__(self, ref):
self._as_parameter_ = ref
MetadataUpdater._instances_[ref] = self
self.kick_event = Event()
self.stop_event = Event()
lib = OcfLib.getInstance()
self.thread = Thread(
group=None,
target=mu_run,
name="mu-{}".format(
lib.ocf_cache_get_name(lib.ocf_metadata_updater_get_cache(self))
),
kwargs={"mu": self, "kick": self.kick_event, "stop": self.stop_event},
)
self.thread.start()
@classmethod
def get_ops(cls):
@ -32,17 +62,41 @@ class MetadataUpdater(SharedOcfObject):
)
return cls.ops
@classmethod
def get_instance(cls, ref):
return cls._instances_[ref]
@classmethod
def del_instance(cls, ref):
del cls._instances_[ref]
@staticmethod
@MetadataUpdaterOps.INIT
def _init(ref):
m = MetadataUpdater(ref)
return 0
@staticmethod
@MetadataUpdaterOps.KICK
def _kick(ref):
pass
MetadataUpdater.get_instance(ref).kick()
@staticmethod
@MetadataUpdaterOps.STOP
def _stop(ref):
pass
MetadataUpdater.get_instance(ref).stop()
del MetadataUpdater._instances_[ref]
def kick(self):
self.kick_event.set()
def stop(self):
self.stop_event.set()
self.kick_event.set()
lib = OcfLib.getInstance()
lib.ocf_metadata_updater_get_cache.argtypes = [c_void_p]
lib.ocf_metadata_updater_get_cache.restype = c_void_p
lib.ocf_metadata_updater_run.argtypes = [c_void_p]
lib.ocf_metadata_updater_run.restype = c_uint32

View File

@ -62,6 +62,9 @@ class Size:
def __int__(self):
return self.bytes
def __index__(self):
return self.bytes
@classmethod
def from_B(cls, value):
return cls(value)

View File

@ -23,8 +23,6 @@ def pyocf_ctx():
c.register_volume_type(Volume)
c.register_volume_type(ErrorDevice)
yield c
for cache in c.caches[:]:
cache.stop()
c.exit()
@ -35,5 +33,4 @@ def pyocf_ctx_log_buffer():
c.register_volume_type(Volume)
c.register_volume_type(ErrorDevice)
yield logger
for cache in c.caches:
cache.stop()
c.exit()

View File

@ -0,0 +1,197 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
from ctypes import c_int
from pyocf.types.cache import Cache, CacheMode
from pyocf.types.core import Core
from pyocf.types.volume import Volume
from pyocf.utils import Size as S
from pyocf.types.data import Data, DataOps
from pyocf.types.ctx import OcfCtx
from pyocf.types.logger import DefaultLogger, LogLevel
from pyocf.ocf import OcfLib
from pyocf.types.metadata_updater import MetadataUpdater
from pyocf.types.cleaner import Cleaner
from pyocf.types.io import IoDir
from pyocf.types.shared import OcfCompletion
class DataCopyTracer(Data):
"""
This class enables tracking whether each copied over Data instance is
then securely erased.
"""
needs_erase = set()
locked_instances = set()
@staticmethod
@DataOps.ALLOC
def _alloc(pages):
data = DataCopyTracer.pages(pages)
Data._ocf_instances_.append(data)
return data.handle.value
def mlock(self):
DataCopyTracer.locked_instances.add(self)
DataCopyTracer.needs_erase.add(self)
return super().mlock()
def munlock(self):
if self in DataCopyTracer.needs_erase:
assert 0, "Erase should be called first on locked Data!"
DataCopyTracer.locked_instances.remove(self)
return super().munlock()
def secure_erase(self):
DataCopyTracer.needs_erase.remove(self)
return super().secure_erase()
def copy(self, src, end, start, size):
DataCopyTracer.needs_erase.add(self)
return super().copy(src, end, start, size)
@pytest.mark.security
@pytest.mark.parametrize(
"cache_mode", [CacheMode.WT, CacheMode.WB, CacheMode.WA, CacheMode.WI]
)
def test_secure_erase_simple_io_read_misses(cache_mode):
"""
Perform simple IO which will trigger read misses, which in turn should
trigger backfill. Track all the data locked/copied for backfill and make
sure OCF calls secure erase and unlock on them.
"""
ctx = OcfCtx(
OcfLib.getInstance(),
b"Security tests ctx",
DefaultLogger(LogLevel.WARN),
DataCopyTracer,
MetadataUpdater,
Cleaner,
)
ctx.register_volume_type(Volume)
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
core_device = Volume(S.from_MiB(50))
core = Core.using_device(core_device)
cache.add_core(core)
write_data = Data.from_string("This is test data")
io = core.new_io()
io.set_data(write_data)
io.configure(20, write_data.size, IoDir.WRITE, 0, 0)
io.set_queue(cache.get_default_queue())
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
io.submit()
cmpl.wait()
cmpls = []
for i in range(100):
read_data = Data(500)
io = core.new_io()
io.set_data(read_data)
io.configure(
(i * 1259) % int(core_device.size), read_data.size, IoDir.READ, 0, 0
)
io.set_queue(cache.get_default_queue())
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
cmpls.append(cmpl)
io.submit()
for c in cmpls:
c.wait()
write_data = Data.from_string("TEST DATA" * 100)
io = core.new_io()
io.set_data(write_data)
io.configure(500, write_data.size, IoDir.WRITE, 0, 0)
io.set_queue(cache.get_default_queue())
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
io.submit()
cmpl.wait()
stats = cache.get_stats()
ctx.exit()
assert (
len(DataCopyTracer.needs_erase) == 0
), "Not all locked Data instances were secure erased!"
assert (
len(DataCopyTracer.locked_instances) == 0
), "Not all locked Data instances were unlocked!"
assert (
stats["req"]["rd_partial_misses"]["value"]
+ stats["req"]["rd_full_misses"]["value"]
) > 0
@pytest.mark.security
def test_secure_erase_simple_io_cleaning():
"""
Perform simple IO which will trigger WB cleaning. Track all the data from
cleaner (locked) and make sure they are erased and unlocked after use.
"""
ctx = OcfCtx(
OcfLib.getInstance(),
b"Security tests ctx",
DefaultLogger(LogLevel.WARN),
DataCopyTracer,
MetadataUpdater,
Cleaner,
)
ctx.register_volume_type(Volume)
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WB)
core_device = Volume(S.from_MiB(100))
core = Core.using_device(core_device)
cache.add_core(core)
cmpls = []
for i in range(10000):
read_data = Data(S.from_KiB(120))
io = core.new_io()
io.set_data(read_data)
io.configure(
(i * 1259) % int(core_device.size), read_data.size, IoDir.WRITE, 0, 0
)
io.set_queue(cache.get_default_queue())
cmpl = OcfCompletion([("err", c_int)])
io.callback = cmpl.callback
cmpls.append(cmpl)
io.submit()
for c in cmpls:
c.wait()
stats = cache.get_stats()
ctx.exit()
assert (
len(DataCopyTracer.needs_erase) == 0
), "Not all locked Data instances were secure erased!"
assert (
len(DataCopyTracer.locked_instances) == 0
), "Not all locked Data instances were unlocked!"
assert (stats["usage"]["clean"]["value"]) > 0, "Cleaner didn't run!"