switch to volume I/O interface in functional tests
... instead of core::new_io() Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
9e88afad62
commit
05f5d49be9
@ -9,6 +9,7 @@ from ctypes import c_int
|
||||
from pyocf.types.cache import Cache
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume, ErrorDevice
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size as S
|
||||
@ -26,13 +27,15 @@ def test_simple_wt_write(pyocf_ctx):
|
||||
|
||||
cache = Cache.start_on_device(cache_device)
|
||||
core = Core.using_device(core_device)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
|
||||
cache_device.reset_stats()
|
||||
core_device.reset_stats()
|
||||
|
||||
r = Rio().target(core).readwrite(ReadWrite.WRITE).size(S.from_sector(1)).run()
|
||||
r = Rio().target(vol).readwrite(ReadWrite.WRITE).size(S.from_sector(1)).run([queue])
|
||||
assert cache_device.get_stats()[IoDir.WRITE] == 1
|
||||
cache.settle()
|
||||
stats = cache.get_stats()
|
||||
@ -87,9 +90,10 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
|
||||
core = Core.using_device(core_device, name="test_core")
|
||||
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
|
||||
write_data = Data.from_string("This is test data")
|
||||
io = core.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
||||
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
||||
write_data.size, IoDir.WRITE, 0, 0)
|
||||
io.set_data(write_data)
|
||||
|
||||
@ -107,7 +111,7 @@ def test_load_cache_with_cores(pyocf_ctx, open_cores):
|
||||
core = cache.get_core_by_name("test_core")
|
||||
|
||||
read_data = Data(write_data.size)
|
||||
io = core.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
||||
io = vol.new_io(cache.get_default_queue(), S.from_sector(3).B,
|
||||
read_data.size, IoDir.READ, 0, 0)
|
||||
io.set_data(read_data)
|
||||
|
||||
|
@ -13,6 +13,7 @@ import pytest
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
@ -28,8 +29,10 @@ def __io(io, queue, address, size, data, direction):
|
||||
return int(completion.results["err"])
|
||||
|
||||
|
||||
def _io(new_io, queue, address, size, data, offset, direction, flags):
|
||||
io = new_io(queue, address, size, direction, 0, flags)
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction, flags):
|
||||
vol = core.get_front_volume()
|
||||
queue = core.cache.get_default_queue()
|
||||
io = vol.new_io(queue, address, size, direction, 0, flags)
|
||||
if direction == IoDir.READ:
|
||||
_data = Data.from_bytes(bytes(size))
|
||||
else:
|
||||
@ -40,19 +43,6 @@ def _io(new_io, queue, address, size, data, offset, direction, flags):
|
||||
return ret
|
||||
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction, flags):
|
||||
return _io(
|
||||
core.new_io,
|
||||
core.cache.get_default_queue(),
|
||||
address,
|
||||
size,
|
||||
data,
|
||||
offset,
|
||||
direction,
|
||||
flags,
|
||||
)
|
||||
|
||||
|
||||
class FlagsValVolume(RamVolume):
|
||||
def __init__(self, size, flags):
|
||||
self.flags = flags
|
||||
@ -91,6 +81,7 @@ def test_io_flags(pyocf_ctx, cache_mode):
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
|
||||
cache_device.set_check(True)
|
||||
core_device.set_check(True)
|
||||
|
@ -11,6 +11,7 @@ from datetime import timedelta
|
||||
from pyocf.types.cache import Cache, PromotionPolicy, NhitParams
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
@ -62,11 +63,13 @@ def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
# Step 2
|
||||
r = (
|
||||
Rio()
|
||||
.target(core)
|
||||
.target(vol)
|
||||
.njobs(10)
|
||||
.bs(Size.from_KiB(4))
|
||||
.readwrite(ReadWrite.RANDWRITE)
|
||||
@ -74,7 +77,7 @@ def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
|
||||
.time_based()
|
||||
.time(timedelta(minutes=1))
|
||||
.qd(10)
|
||||
.run_async()
|
||||
.run_async([queue])
|
||||
)
|
||||
|
||||
# Step 3
|
||||
@ -85,7 +88,7 @@ def test_change_to_nhit_and_back_io_in_flight(pyocf_ctx):
|
||||
assert r.error_count == 0, "No IO's should fail when turning NHIT policy on"
|
||||
|
||||
# Step 5
|
||||
r.run_async()
|
||||
r.run_async([queue])
|
||||
|
||||
# Step 6
|
||||
cache.set_promotion_policy(PromotionPolicy.ALWAYS)
|
||||
@ -107,15 +110,17 @@ def fill_cache(cache, fill_ratio):
|
||||
bytes_to_fill = Size(round(cache_lines.bytes * fill_ratio))
|
||||
|
||||
core = cache.cores[0]
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
r = (
|
||||
Rio()
|
||||
.target(core)
|
||||
.target(vol)
|
||||
.readwrite(ReadWrite.RANDWRITE)
|
||||
.size(bytes_to_fill)
|
||||
.bs(Size(512))
|
||||
.qd(10)
|
||||
.run()
|
||||
.run([queue])
|
||||
)
|
||||
|
||||
|
||||
@ -143,6 +148,8 @@ def test_promoted_after_hits_various_thresholds(
|
||||
cache = Cache.start_on_device(cache_device, promotion_policy=PromotionPolicy.NHIT)
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
# Step 2
|
||||
cache.set_promotion_policy_param(
|
||||
@ -167,12 +174,12 @@ def test_promoted_after_hits_various_thresholds(
|
||||
.readwrite(ReadWrite.WRITE)
|
||||
.bs(Size(4096))
|
||||
.offset(last_core_line)
|
||||
.target(core)
|
||||
.target(vol)
|
||||
.size(Size(4096) + last_core_line)
|
||||
)
|
||||
|
||||
for i in range(insertion_threshold - 1):
|
||||
r.run()
|
||||
r.run([queue])
|
||||
|
||||
cache.settle()
|
||||
stats = cache.get_stats()
|
||||
@ -183,7 +190,7 @@ def test_promoted_after_hits_various_thresholds(
|
||||
)
|
||||
|
||||
# Step 5
|
||||
r.run()
|
||||
r.run([queue])
|
||||
|
||||
cache.settle()
|
||||
stats = cache.get_stats()
|
||||
@ -213,9 +220,11 @@ def test_partial_hit_promotion(pyocf_ctx):
|
||||
cache = Cache.start_on_device(cache_device)
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
# Step 2
|
||||
r = Rio().readwrite(ReadWrite.READ).bs(Size(512)).size(Size(512)).target(core).run()
|
||||
r = Rio().readwrite(ReadWrite.READ).bs(Size(512)).size(Size(512)).target(vol).run([queue])
|
||||
|
||||
stats = cache.get_stats()
|
||||
cache_lines = stats["conf"]["size"]
|
||||
@ -232,7 +241,7 @@ def test_partial_hit_promotion(pyocf_ctx):
|
||||
|
||||
# Step 4
|
||||
req_size = Size(2 * cache_lines.line_size)
|
||||
r.size(req_size).bs(req_size).readwrite(ReadWrite.WRITE).run()
|
||||
r.size(req_size).bs(req_size).readwrite(ReadWrite.WRITE).run([queue])
|
||||
|
||||
cache.settle()
|
||||
stats = cache.get_stats()
|
||||
|
@ -15,6 +15,7 @@ from datetime import datetime
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
@ -38,8 +39,8 @@ def __io(io, queue, address, size, data, direction):
|
||||
return int(completion.results["err"])
|
||||
|
||||
|
||||
def _io(new_io, queue, address, size, data, offset, direction):
|
||||
io = new_io(queue, address, size, direction, 0, 0)
|
||||
def io_to_exp_obj(vol, queue, address, size, data, offset, direction):
|
||||
io = vol.new_io(queue, address, size, direction, 0, 0)
|
||||
if direction == IoDir.READ:
|
||||
_data = Data.from_bytes(bytes(size))
|
||||
else:
|
||||
@ -50,30 +51,6 @@ def _io(new_io, queue, address, size, data, offset, direction):
|
||||
return ret
|
||||
|
||||
|
||||
def io_to_core(core, address, size, data, offset, direction):
|
||||
return _io(
|
||||
core.new_core_io,
|
||||
core.cache.get_default_queue(),
|
||||
address,
|
||||
size,
|
||||
data,
|
||||
offset,
|
||||
direction,
|
||||
)
|
||||
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction):
|
||||
return _io(
|
||||
core.new_io,
|
||||
core.cache.get_default_queue(),
|
||||
address,
|
||||
size,
|
||||
data,
|
||||
offset,
|
||||
direction,
|
||||
)
|
||||
|
||||
|
||||
def sector_to_region(sector, region_start):
|
||||
num_regions = len(region_start)
|
||||
i = 0
|
||||
@ -266,9 +243,11 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
||||
cache = Cache.start_on_device(
|
||||
cache_device, cache_mode=CacheMode.WO, cache_line_size=cacheline_size
|
||||
)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
queue = cache.get_default_queue()
|
||||
vol = CoreVolume(core, open=True)
|
||||
|
||||
insert_order = list(range(CACHELINE_COUNT))
|
||||
|
||||
@ -311,7 +290,8 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
||||
# write data to core and invalidate all CL and write data pattern to core
|
||||
cache.change_cache_mode(cache_mode=CacheMode.PT)
|
||||
io_to_exp_obj(
|
||||
core,
|
||||
vol,
|
||||
queue,
|
||||
WORKSET_OFFSET,
|
||||
len(data[SectorStatus.INVALID]),
|
||||
data[SectorStatus.INVALID],
|
||||
@ -332,7 +312,8 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
||||
region = sector_to_region(sec, region_start)
|
||||
if region_state[region] != SectorStatus.INVALID:
|
||||
io_to_exp_obj(
|
||||
core,
|
||||
vol,
|
||||
queue,
|
||||
WORKSET_OFFSET + SECTOR_SIZE * sec,
|
||||
SECTOR_SIZE,
|
||||
data[SectorStatus.CLEAN],
|
||||
@ -346,7 +327,8 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
||||
region = sector_to_region(sec, region_start)
|
||||
if region_state[region] == SectorStatus.DIRTY:
|
||||
io_to_exp_obj(
|
||||
core,
|
||||
vol,
|
||||
queue,
|
||||
WORKSET_OFFSET + SECTOR_SIZE * sec,
|
||||
SECTOR_SIZE,
|
||||
data[SectorStatus.DIRTY],
|
||||
@ -373,7 +355,7 @@ def test_read_data_consistency(pyocf_ctx, cacheline_size, cache_mode, rand_seed)
|
||||
END = end * SECTOR_SIZE
|
||||
size = (end - start + 1) * SECTOR_SIZE
|
||||
assert 0 == io_to_exp_obj(
|
||||
core, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ
|
||||
vol, queue, WORKSET_OFFSET + START, size, result_b, START, IoDir.READ
|
||||
), "error reading in {}: region_state={}, start={}, end={}, insert_order={}".format(
|
||||
cache_mode, region_state, start, end, insert_order
|
||||
)
|
||||
|
@ -11,6 +11,7 @@ import pytest
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
@ -27,11 +28,11 @@ class Stream:
|
||||
return f"{self.last} {self.length} {self.direction}"
|
||||
|
||||
|
||||
def _io(core, addr, size, direction, context):
|
||||
def _io(vol, queue, addr, size, direction, context):
|
||||
comp = OcfCompletion([("error", c_int)], context=context)
|
||||
data = Data(size)
|
||||
|
||||
io = core.new_io(core.cache.get_default_queue(), addr, size, direction, 0, 0)
|
||||
io = vol.new_io(queue, addr, size, direction, 0, 0)
|
||||
io.set_data(data)
|
||||
io.callback = comp.callback
|
||||
io.submit()
|
||||
@ -39,11 +40,11 @@ def _io(core, addr, size, direction, context):
|
||||
return comp
|
||||
|
||||
|
||||
def io_to_streams(core, streams, io_size):
|
||||
def io_to_streams(vol, queue, streams, io_size):
|
||||
completions = []
|
||||
for stream in streams:
|
||||
completions.append(
|
||||
_io(core, stream.last, io_size, stream.direction, context=(io_size, stream))
|
||||
_io(vol, queue, stream.last, io_size, stream.direction, context=(io_size, stream))
|
||||
)
|
||||
|
||||
for c in completions:
|
||||
@ -94,6 +95,8 @@ def test_seq_cutoff_max_streams(pyocf_ctx):
|
||||
core = Core.using_device(RamVolume(core_size), seq_cutoff_promotion_count=1)
|
||||
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
cache.set_seq_cut_off_policy(SeqCutOffPolicy.ALWAYS)
|
||||
cache.set_seq_cut_off_threshold(threshold)
|
||||
@ -101,7 +104,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx):
|
||||
# STEP 1
|
||||
shuffle(streams)
|
||||
io_size = threshold - Size.from_sector(1)
|
||||
io_to_streams(core, streams, io_size)
|
||||
io_to_streams(vol, queue, streams, io_size)
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert (
|
||||
@ -115,7 +118,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx):
|
||||
streams.remove(lru_stream)
|
||||
|
||||
shuffle(streams)
|
||||
io_to_streams(core, streams, Size.from_sector(1))
|
||||
io_to_streams(vol, queue, streams, Size.from_sector(1))
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert (
|
||||
@ -126,7 +129,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx):
|
||||
), "All streams should be handled in PT - cutoff engaged for all streams"
|
||||
|
||||
# STEP 3
|
||||
io_to_streams(core, [non_active_stream], Size.from_sector(1))
|
||||
io_to_streams(vol, queue, [non_active_stream], Size.from_sector(1))
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert (
|
||||
@ -134,7 +137,7 @@ def test_seq_cutoff_max_streams(pyocf_ctx):
|
||||
), "This request should be serviced by cache - no cutoff for inactive stream"
|
||||
|
||||
# STEP 4
|
||||
io_to_streams(core, [lru_stream], Size.from_sector(1))
|
||||
io_to_streams(vol, queue, [lru_stream], Size.from_sector(1))
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert (
|
||||
|
@ -15,6 +15,7 @@ from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.types.shared import OcfCompletion, CacheLineSize, SeqCutOffPolicy, CacheLines
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.utils import Size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -31,18 +32,20 @@ def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
|
||||
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
|
||||
cache_size = cache.get_stats()["conf"]["size"]
|
||||
core_exported1 = Core.using_device(core_device1, name="core1")
|
||||
core_exported2 = Core.using_device(core_device2, name="core2")
|
||||
cache.add_core(core_exported1)
|
||||
cache.add_core(core_exported2)
|
||||
core1 = Core.using_device(core_device1, name="core1")
|
||||
core2 = Core.using_device(core_device2, name="core2")
|
||||
cache.add_core(core1)
|
||||
vol1 = CoreVolume(core1, open=True)
|
||||
cache.add_core(core2)
|
||||
vol2 = CoreVolume(core2, open=True)
|
||||
|
||||
valid_io_size = Size.from_B(cache_size.B)
|
||||
test_data = Data(valid_io_size)
|
||||
send_io(core_exported1, test_data)
|
||||
send_io(core_exported2, test_data)
|
||||
send_io(core1, test_data)
|
||||
send_io(core2, test_data)
|
||||
|
||||
stats1 = core_exported1.get_stats()
|
||||
stats2 = core_exported2.get_stats()
|
||||
stats1 = core1.get_stats()
|
||||
stats2 = core2.get_stats()
|
||||
# IO to the second core should evict all the data from the first core
|
||||
assert stats1["usage"]["occupancy"]["value"] == 0
|
||||
assert stats2["usage"]["occupancy"]["value"] == valid_io_size.blocks_4k
|
||||
@ -57,15 +60,16 @@ def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLin
|
||||
core_device = RamVolume(Size.from_MiB(200))
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
|
||||
cache_size = cache.get_stats()["conf"]["size"]
|
||||
core_exported = Core.using_device(core_device)
|
||||
cache.add_core(core_exported)
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
|
||||
|
||||
valid_io_size = Size.from_B(cache_size.B // 2)
|
||||
test_data = Data(valid_io_size)
|
||||
send_io(core_exported, test_data)
|
||||
send_io(core, test_data)
|
||||
|
||||
stats = core_exported.cache.get_stats()
|
||||
stats = core.cache.get_stats()
|
||||
first_block_sts = stats["block"]
|
||||
first_usage_sts = stats["usage"]
|
||||
pt_writes_first = stats["req"]["wr_pt"]
|
||||
@ -80,12 +84,12 @@ def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLin
|
||||
io_size_bigger_than_cache = Size.from_MiB(100)
|
||||
io_offset = valid_io_size
|
||||
test_data = Data(io_size_bigger_than_cache)
|
||||
send_io(core_exported, test_data, io_offset)
|
||||
send_io(core, test_data, io_offset)
|
||||
|
||||
if mode is not CacheMode.WT:
|
||||
# Flush first write
|
||||
cache.flush()
|
||||
stats = core_exported.cache.get_stats()
|
||||
stats = core.cache.get_stats()
|
||||
second_block_sts = stats["block"]
|
||||
second_usage_sts = stats["usage"]
|
||||
pt_writes_second = stats["req"]["wr_pt"]
|
||||
@ -113,6 +117,7 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
||||
)
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
|
||||
test_ioclass_id = 1
|
||||
pinned_ioclass_id = 2
|
||||
@ -176,9 +181,10 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
|
||||
), "Overflown part has not been evicted"
|
||||
|
||||
|
||||
def send_io(exported_obj: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
|
||||
io = exported_obj.new_io(
|
||||
exported_obj.cache.get_default_queue(),
|
||||
def send_io(core: Core, data: Data, addr: int = 0, target_ioclass: int = 0):
|
||||
vol = core.get_front_volume()
|
||||
io = vol.new_io(
|
||||
core.cache.get_default_queue(),
|
||||
addr,
|
||||
data.size,
|
||||
IoDir.WRITE,
|
||||
|
@ -13,6 +13,7 @@ import pytest
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
@ -28,8 +29,8 @@ def __io(io, queue, address, size, data, direction):
|
||||
return int(completion.results["err"])
|
||||
|
||||
|
||||
def _io(new_io, queue, address, size, data, offset, direction, flags):
|
||||
io = new_io(queue, address, size, direction, 0, flags)
|
||||
def io_to_exp_obj(vol, queue, address, size, data, offset, direction, flags):
|
||||
io = vol.new_io(queue, address, size, direction, 0, flags)
|
||||
if direction == IoDir.READ:
|
||||
_data = Data.from_bytes(bytes(size))
|
||||
else:
|
||||
@ -40,19 +41,6 @@ def _io(new_io, queue, address, size, data, offset, direction, flags):
|
||||
return ret
|
||||
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction, flags):
|
||||
return _io(
|
||||
core.new_io,
|
||||
core.cache.get_default_queue(),
|
||||
address,
|
||||
size,
|
||||
data,
|
||||
offset,
|
||||
direction,
|
||||
flags,
|
||||
)
|
||||
|
||||
|
||||
class FlushValVolume(RamVolume):
|
||||
def __init__(self, size):
|
||||
self.flush_last = False
|
||||
@ -87,12 +75,15 @@ def test_flush_after_mngmt(pyocf_ctx):
|
||||
cache.add_core(core)
|
||||
assert cache_device.flush_last
|
||||
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
# WT I/O to write data to core and cache VC
|
||||
io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
|
||||
# WB I/O to produce dirty cachelines in CAS
|
||||
cache.change_cache_mode(CacheMode.WB)
|
||||
io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.WRITE, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * 1, block_size, data, 0, IoDir.WRITE, 0)
|
||||
|
||||
# after cache flush VCs are expected to be cleared
|
||||
cache.flush()
|
||||
@ -100,14 +91,14 @@ def test_flush_after_mngmt(pyocf_ctx):
|
||||
assert core_device.flush_last
|
||||
|
||||
# I/O to write data to cache device VC
|
||||
io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
|
||||
# cache save must flush VC
|
||||
cache.save()
|
||||
assert cache_device.flush_last
|
||||
|
||||
# I/O to write data to cache device VC
|
||||
io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * 0, block_size, data, 0, IoDir.WRITE, 0)
|
||||
|
||||
# cache stop must flush VC
|
||||
cache.stop()
|
||||
|
@ -9,9 +9,11 @@ from ctypes import c_int
|
||||
from random import randint
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume import RamVolume, Volume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.types.queue import Queue
|
||||
from pyocf.utils import Size as S
|
||||
from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize
|
||||
|
||||
@ -79,11 +81,14 @@ def test_remove_dirty_no_flush(pyocf_ctx, cache_mode, cls):
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = core.cache.get_default_queue()
|
||||
|
||||
# Prepare data
|
||||
core_size = core.get_stats()["size"]
|
||||
data = Data(core_size.B)
|
||||
|
||||
_io_to_core(core, data)
|
||||
_io_to_core(vol, queue, data)
|
||||
|
||||
# Remove core from cache
|
||||
cache.remove_core(core)
|
||||
@ -122,11 +127,12 @@ def test_10add_remove_with_io(pyocf_ctx):
|
||||
# Add and remove core 10 times in a loop with io in between
|
||||
for i in range(0, 10):
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
stats = cache.get_stats()
|
||||
assert stats["conf"]["core_count"] == 1
|
||||
|
||||
write_data = Data.from_string("Test data")
|
||||
io = core.new_io(
|
||||
io = vol.new_io(
|
||||
cache.get_default_queue(), S.from_sector(1).B, write_data.size,
|
||||
IoDir.WRITE, 0, 0
|
||||
)
|
||||
@ -303,8 +309,8 @@ def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
|
||||
assert stats["conf"]["core_count"] == core_amount
|
||||
|
||||
|
||||
def _io_to_core(exported_obj: Core, data: Data):
|
||||
io = exported_obj.new_io(exported_obj.cache.get_default_queue(), 0, data.size,
|
||||
def _io_to_core(vol: Volume, queue: Queue, data: Data):
|
||||
io = vol.new_io(queue, 0, data.size,
|
||||
IoDir.WRITE, 0, 0)
|
||||
io.set_data(data)
|
||||
|
||||
|
@ -28,6 +28,7 @@ from pyocf.types.shared import (
|
||||
SeqCutOffPolicy,
|
||||
)
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.utils import Size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -49,6 +50,9 @@ def test_attach_different_size(
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
cache.configure_partition(
|
||||
part_id=1, name="test_part", max_size=50, priority=1
|
||||
)
|
||||
@ -61,7 +65,7 @@ def test_attach_different_size(
|
||||
data = bytes(block_size)
|
||||
|
||||
for i in range(cache_size.blocks_4k):
|
||||
io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||
|
||||
part_current_size = CacheLines(
|
||||
cache.get_partition_info(part_id=1)["_curr_size"], cls
|
||||
@ -76,7 +80,7 @@ def test_attach_different_size(
|
||||
cache_size = cache.get_stats()["conf"]["size"]
|
||||
|
||||
for i in range(cache_size.blocks_4k):
|
||||
io_to_exp_obj(core, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||
io_to_exp_obj(vol, queue, block_size * i, block_size, data, 0, IoDir.WRITE, 1, 0)
|
||||
|
||||
part_current_size = CacheLines(
|
||||
cache.get_partition_info(part_id=1)["_curr_size"], cls
|
||||
@ -85,22 +89,8 @@ def test_attach_different_size(
|
||||
assert part_current_size.blocks_4k == cache_size.blocks_4k * 0.5
|
||||
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction, target_ioclass, flags):
|
||||
return _io(
|
||||
core.new_io,
|
||||
core.cache.get_default_queue(),
|
||||
address,
|
||||
size,
|
||||
data,
|
||||
offset,
|
||||
direction,
|
||||
target_ioclass,
|
||||
flags,
|
||||
)
|
||||
|
||||
|
||||
def _io(new_io, queue, address, size, data, offset, direction, target_ioclass, flags):
|
||||
io = new_io(queue, address, size, direction, target_ioclass, flags)
|
||||
def io_to_exp_obj(vol, queue, address, size, data, offset, direction, target_ioclass, flags):
|
||||
io = vol.new_io(queue, address, size, direction, target_ioclass, flags)
|
||||
if direction == IoDir.READ:
|
||||
_data = Data.from_bytes(bytes(size))
|
||||
else:
|
||||
|
@ -24,8 +24,10 @@ from pyocf.types.core import Core
|
||||
from pyocf.types.ctx import OcfCtx
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.types.queue import Queue
|
||||
from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize, SeqCutOffPolicy
|
||||
from pyocf.types.volume import Volume, RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.utils import Size
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -62,21 +64,23 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache
|
||||
cache_device = RamVolume(Size.from_MiB(50))
|
||||
core_device = RamVolume(Size.from_MiB(10))
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
|
||||
core_exported = Core.using_device(core_device)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core_exported)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
logger.info("[STAGE] Initial write to exported object")
|
||||
cache_device.reset_stats()
|
||||
core_device.reset_stats()
|
||||
|
||||
test_data = Data.from_string("This is test data")
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_empty(core_exported, mode, cls)
|
||||
io_to_core(vol, queue, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_empty(core, mode, cls)
|
||||
|
||||
logger.info("[STAGE] Read from exported object after initial write")
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core_exported, mode, cls, True)
|
||||
io_from_exported_object(vol, queue, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core, mode, cls, True)
|
||||
|
||||
logger.info("[STAGE] Write to exported object after read")
|
||||
cache_device.reset_stats()
|
||||
@ -84,10 +88,10 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache
|
||||
|
||||
test_data = Data.from_string("Changed test data")
|
||||
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_after_read(core_exported, mode, cls)
|
||||
io_to_core(vol, queue, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_after_read(core, mode, cls)
|
||||
|
||||
check_md5_sums(core_exported, mode)
|
||||
check_md5_sums(core, mode)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||
@ -100,20 +104,23 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL
|
||||
cache_device = RamVolume(Size.from_MiB(50))
|
||||
core_device = RamVolume(Size.from_MiB(5))
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
|
||||
core_exported = Core.using_device(core_device)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core_exported)
|
||||
cache.add_core(core)
|
||||
front_vol = CoreVolume(core, open=True)
|
||||
bottom_vol = core.get_volume()
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
logger.info("[STAGE] Initial write to core device")
|
||||
test_data = Data.from_string("This is test data")
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B, True)
|
||||
io_to_core(bottom_vol, queue, test_data, Size.from_sector(1).B)
|
||||
|
||||
cache_device.reset_stats()
|
||||
core_device.reset_stats()
|
||||
|
||||
logger.info("[STAGE] Initial read from exported object")
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_empty(core_exported, mode, cls)
|
||||
io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_empty(core, mode, cls)
|
||||
|
||||
logger.info("[STAGE] Write to exported object after initial read")
|
||||
cache_device.reset_stats()
|
||||
@ -121,15 +128,15 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL
|
||||
|
||||
test_data = Data.from_string("Changed test data")
|
||||
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
io_to_core(front_vol, queue, test_data, Size.from_sector(1).B)
|
||||
|
||||
check_stats_write_after_read(core_exported, mode, cls, True)
|
||||
check_stats_write_after_read(core, mode, cls, True)
|
||||
|
||||
logger.info("[STAGE] Read from exported object after write")
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core_exported, mode, cls)
|
||||
io_from_exported_object(front_vol, queue, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core, mode, cls)
|
||||
|
||||
check_md5_sums(core_exported, mode)
|
||||
check_md5_sums(core, mode)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cls", CacheLineSize)
|
||||
@ -180,17 +187,21 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
|
||||
cache_device = RamVolume(Size.from_MiB(50))
|
||||
core_device = RamVolume(Size.from_MiB(5))
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
|
||||
core_exported = Core.using_device(core_device)
|
||||
cache.add_core(core_exported)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core)
|
||||
front_vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
cls_no = 10
|
||||
|
||||
run_io_and_cache_data_if_possible(core_exported, mode, cls, cls_no)
|
||||
run_io_and_cache_data_if_possible(core, mode, cls, cls_no)
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert int(stats["conf"]["dirty"]) == (cls_no if mode.lazy_write() else 0),\
|
||||
"Dirty data before MD5"
|
||||
|
||||
md5_exported_core = core_exported.exp_obj_md5()
|
||||
md5_exported_core = core.exp_obj_md5()
|
||||
|
||||
if with_flush:
|
||||
cache.flush()
|
||||
@ -417,27 +428,29 @@ def test_start_stop_noqueue(pyocf_ctx):
|
||||
assert not c.results["error"], "Failed to stop cache: {}".format(c.results["error"])
|
||||
|
||||
|
||||
def run_io_and_cache_data_if_possible(exported_obj, mode, cls, cls_no):
|
||||
def run_io_and_cache_data_if_possible(core, mode, cls, cls_no):
|
||||
front_vol = core.get_front_volume()
|
||||
bottom_vol = core.get_volume()
|
||||
queue = core.cache.get_default_queue()
|
||||
|
||||
test_data = Data(cls_no * cls)
|
||||
|
||||
if mode in {CacheMode.WI, CacheMode.WA}:
|
||||
logger.info("[STAGE] Write to core device")
|
||||
io_to_core(exported_obj, test_data, 0, True)
|
||||
io_to_core(bottom_vol, queue, test_data, 0)
|
||||
logger.info("[STAGE] Read from exported object")
|
||||
io_from_exported_object(exported_obj, test_data.size, 0)
|
||||
io_from_exported_object(front_vol, queue, test_data.size, 0)
|
||||
else:
|
||||
logger.info("[STAGE] Write to exported object")
|
||||
io_to_core(exported_obj, test_data, 0)
|
||||
io_to_core(front_vol, queue, test_data, 0)
|
||||
|
||||
stats = exported_obj.cache.get_stats()
|
||||
stats = core.cache.get_stats()
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
((cls_no * cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy"
|
||||
|
||||
|
||||
def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False):
|
||||
new_io = exported_obj.new_core_io if to_core_device else exported_obj.new_io
|
||||
io = new_io(exported_obj.cache.get_default_queue(), offset, data.size,
|
||||
IoDir.WRITE, 0, 0)
|
||||
def io_to_core(vol: Volume, queue: Queue, data: Data, offset: int):
|
||||
io = vol.new_io(queue, offset, data.size, IoDir.WRITE, 0, 0)
|
||||
io.set_data(data)
|
||||
|
||||
completion = OcfCompletion([("err", c_int)])
|
||||
@ -448,10 +461,9 @@ def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False
|
||||
assert completion.results["err"] == 0, "IO to exported object completion"
|
||||
|
||||
|
||||
def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
|
||||
def io_from_exported_object(vol: Volume, queue: Queue, buffer_size: int, offset: int):
|
||||
read_buffer = Data(buffer_size)
|
||||
io = exported_obj.new_io(exported_obj.cache.get_default_queue(), offset,
|
||||
read_buffer.size, IoDir.READ, 0, 0)
|
||||
io = vol.new_io(queue, offset, read_buffer.size, IoDir.READ, 0, 0)
|
||||
io.set_data(read_buffer)
|
||||
|
||||
completion = OcfCompletion([("err", c_int)])
|
||||
@ -463,28 +475,28 @@ def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
|
||||
return read_buffer
|
||||
|
||||
|
||||
def check_stats_read_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
exported_obj.cache.settle()
|
||||
stats = exported_obj.cache.get_stats()
|
||||
def check_stats_read_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
core.cache.settle()
|
||||
stats = core.cache.get_stats()
|
||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
|
||||
assert core.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
|
||||
assert core.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
|
||||
assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
|
||||
"Read full misses"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
|
||||
|
||||
|
||||
def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
exported_obj.cache.settle()
|
||||
stats = exported_obj.cache.get_stats()
|
||||
def check_stats_write_empty(core: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
core.cache.settle()
|
||||
stats = core.cache.get_stats()
|
||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||
# TODO(ajrutkow): why 1 for WT ??
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
"Writes to core device"
|
||||
assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
|
||||
"Write full misses"
|
||||
@ -493,17 +505,17 @@ def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineS
|
||||
"Occupancy"
|
||||
|
||||
|
||||
def check_stats_write_after_read(exported_obj: Core,
|
||||
def check_stats_write_after_read(core: Core,
|
||||
mode: CacheMode,
|
||||
cls: CacheLineSize,
|
||||
read_from_empty=False):
|
||||
exported_obj.cache.settle()
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
core.cache.settle()
|
||||
stats = core.cache.get_stats()
|
||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else
|
||||
(2 if read_from_empty and mode.lazy_write() else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
assert core.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
"Writes to core device"
|
||||
assert stats["req"]["wr_hits"]["value"] == \
|
||||
(1 if (mode.read_insert() and mode != CacheMode.WI)
|
||||
@ -514,17 +526,17 @@ def check_stats_write_after_read(exported_obj: Core,
|
||||
"Occupancy"
|
||||
|
||||
|
||||
def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
|
||||
exported_obj.cache.settle()
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
def check_stats_read_after_write(core, mode, cls, write_to_empty=False):
|
||||
core.cache.settle()
|
||||
stats = core.cache.get_stats()
|
||||
assert core.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.READ] == \
|
||||
assert core.cache.device.get_stats()[IoDir.READ] == \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
||||
or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
"Reads from cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.READ] == \
|
||||
assert core.device.get_stats()[IoDir.READ] == \
|
||||
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
|
||||
or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
||||
"Reads from core device"
|
||||
@ -540,15 +552,15 @@ def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
|
||||
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
|
||||
|
||||
|
||||
def check_md5_sums(exported_obj: Core, mode: CacheMode):
|
||||
def check_md5_sums(core: Core, mode: CacheMode):
|
||||
if mode.lazy_write():
|
||||
assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \
|
||||
assert core.device.md5() != core.exp_obj_md5(), \
|
||||
"MD5 check: core device vs exported object without flush"
|
||||
exported_obj.cache.flush()
|
||||
assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
|
||||
core.cache.flush()
|
||||
assert core.device.md5() == core.exp_obj_md5(), \
|
||||
"MD5 check: core device vs exported object after flush"
|
||||
else:
|
||||
assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \
|
||||
assert core.device.md5() == core.exp_obj_md5(), \
|
||||
"MD5 check: core device vs exported object"
|
||||
|
||||
|
||||
|
@ -11,8 +11,10 @@ import pytest
|
||||
from pyocf.types.cache import Cache, Core
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.types.queue import Queue
|
||||
from pyocf.types.shared import OcfCompletion
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume import Volume, RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.utils import Size
|
||||
|
||||
|
||||
@ -22,9 +24,9 @@ def test_neg_write_too_long_data(pyocf_ctx, c_uint16_randomize):
|
||||
Check if writing data larger than exported object size is properly blocked
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(1))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(1))
|
||||
data = Data(int(Size.from_KiB(c_uint16_randomize)))
|
||||
completion = io_operation(core, data, IoDir.WRITE)
|
||||
completion = io_operation(vol, queue, data, IoDir.WRITE)
|
||||
|
||||
if c_uint16_randomize > 1024:
|
||||
assert completion.results["err"] != 0
|
||||
@ -38,9 +40,9 @@ def test_neg_read_too_long_data(pyocf_ctx, c_uint16_randomize):
|
||||
Check if reading data larger than exported object size is properly blocked
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(1))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(1))
|
||||
data = Data(int(Size.from_KiB(c_uint16_randomize)))
|
||||
completion = io_operation(core, data, IoDir.READ)
|
||||
completion = io_operation(vol, queue, data, IoDir.READ)
|
||||
|
||||
if c_uint16_randomize > 1024:
|
||||
assert completion.results["err"] != 0
|
||||
@ -56,9 +58,9 @@ def test_neg_write_too_far(pyocf_ctx, c_uint16_randomize):
|
||||
"""
|
||||
|
||||
limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
|
||||
core = prepare_cache_and_core(Size.from_MiB(4))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(4))
|
||||
data = Data(int(Size.from_KiB(limited_size)))
|
||||
completion = io_operation(core, data, IoDir.WRITE, int(Size.from_MiB(3)))
|
||||
completion = io_operation(vol, queue, data, IoDir.WRITE, int(Size.from_MiB(3)))
|
||||
|
||||
if limited_size > 1024:
|
||||
assert completion.results["err"] != 0
|
||||
@ -74,9 +76,9 @@ def test_neg_read_too_far(pyocf_ctx, c_uint16_randomize):
|
||||
"""
|
||||
|
||||
limited_size = c_uint16_randomize % (int(Size.from_KiB(4)) + 1)
|
||||
core = prepare_cache_and_core(Size.from_MiB(4))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(4))
|
||||
data = Data(int(Size.from_KiB(limited_size)))
|
||||
completion = io_operation(core, data, IoDir.READ, offset=(Size.from_MiB(3)))
|
||||
completion = io_operation(vol, queue, data, IoDir.READ, offset=(Size.from_MiB(3)))
|
||||
|
||||
if limited_size > 1024:
|
||||
assert completion.results["err"] != 0
|
||||
@ -91,9 +93,9 @@ def test_neg_write_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
|
||||
IO offset is located outside of device range
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_KiB(1)))
|
||||
completion = io_operation(core, data, IoDir.WRITE, offset=c_int_sector_randomize)
|
||||
completion = io_operation(vol, queue, data, IoDir.WRITE, offset=c_int_sector_randomize)
|
||||
|
||||
if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
|
||||
assert completion.results["err"] == 0
|
||||
@ -108,9 +110,9 @@ def test_neg_read_offset_outside_of_device(pyocf_ctx, c_int_sector_randomize):
|
||||
IO offset is located outside of device range
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_KiB(1)))
|
||||
completion = io_operation(core, data, IoDir.READ, offset=c_int_sector_randomize)
|
||||
completion = io_operation(vol, queue, data, IoDir.READ, offset=c_int_sector_randomize)
|
||||
|
||||
if 0 <= c_int_sector_randomize <= int(Size.from_MiB(2)) - int(Size.from_KiB(1)):
|
||||
assert completion.results["err"] == 0
|
||||
@ -125,11 +127,11 @@ def test_neg_offset_unaligned(pyocf_ctx, c_int_randomize):
|
||||
IO offset is not aligned
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_KiB(1)))
|
||||
if c_int_randomize % 512 != 0:
|
||||
with pytest.raises(Exception, match="Failed to create io!"):
|
||||
core.new_io(core.cache.get_default_queue(), c_int_randomize, data.size,
|
||||
vol.new_io(queue, c_int_randomize, data.size,
|
||||
IoDir.WRITE, 0, 0)
|
||||
|
||||
|
||||
@ -140,11 +142,11 @@ def test_neg_size_unaligned(pyocf_ctx, c_uint16_randomize):
|
||||
IO size is not aligned
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_B(c_uint16_randomize)))
|
||||
if c_uint16_randomize % 512 != 0:
|
||||
with pytest.raises(Exception, match="Failed to create io!"):
|
||||
core.new_io(core.cache.get_default_queue(), 0, data.size,
|
||||
vol.new_io(queue, 0, data.size,
|
||||
IoDir.WRITE, 0, 0)
|
||||
|
||||
|
||||
@ -155,9 +157,9 @@ def test_neg_io_class(pyocf_ctx, c_int_randomize):
|
||||
number is not in allowed values {0, ..., 32}
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_MiB(1)))
|
||||
completion = io_operation(core, data, randrange(0, 2), io_class=c_int_randomize)
|
||||
completion = io_operation(vol, queue, data, randrange(0, 2), io_class=c_int_randomize)
|
||||
|
||||
if 0 <= c_int_randomize <= 32:
|
||||
assert completion.results["err"] == 0
|
||||
@ -172,9 +174,9 @@ def test_neg_io_direction(pyocf_ctx, c_int_randomize):
|
||||
that is when IO direction value is not in allowed values {0, 1}
|
||||
"""
|
||||
|
||||
core = prepare_cache_and_core(Size.from_MiB(2))
|
||||
vol, queue = prepare_cache_and_core(Size.from_MiB(2))
|
||||
data = Data(int(Size.from_MiB(1)))
|
||||
completion = io_operation(core, data, c_int_randomize)
|
||||
completion = io_operation(vol, queue, data, c_int_randomize)
|
||||
|
||||
if c_int_randomize in [0, 1]:
|
||||
assert completion.results["err"] == 0
|
||||
@ -190,12 +192,21 @@ def prepare_cache_and_core(core_size: Size, cache_size: Size = Size.from_MiB(50)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core)
|
||||
return core
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
return vol, queue
|
||||
|
||||
|
||||
def io_operation(core: Core, data: Data, io_direction: int, offset: int = 0, io_class: int = 0):
|
||||
io = core.new_io(core.cache.get_default_queue(), offset, data.size,
|
||||
io_direction, io_class, 0)
|
||||
def io_operation(
|
||||
vol: Volume,
|
||||
queue: Queue,
|
||||
data: Data,
|
||||
io_direction: int,
|
||||
offset: int = 0,
|
||||
io_class: int = 0,
|
||||
):
|
||||
io = vol.new_io(queue, offset, data.size, io_direction, io_class, 0)
|
||||
io.set_data(data)
|
||||
|
||||
completion = OcfCompletion([("err", c_int)])
|
||||
|
@ -8,7 +8,8 @@ from ctypes import c_int
|
||||
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import RamVolume
|
||||
from pyocf.types.volume import Volume, RamVolume
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.utils import Size as S
|
||||
from pyocf.types.data import Data, DataOps
|
||||
from pyocf.types.ctx import OcfCtx
|
||||
@ -83,10 +84,12 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
||||
core_device = RamVolume(S.from_MiB(50))
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
write_data = DataCopyTracer(S.from_sector(1))
|
||||
io = core.new_io(
|
||||
cache.get_default_queue(),
|
||||
io = vol.new_io(
|
||||
queue,
|
||||
S.from_sector(1).B,
|
||||
write_data.size,
|
||||
IoDir.WRITE,
|
||||
@ -103,8 +106,8 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
||||
cmpls = []
|
||||
for i in range(100):
|
||||
read_data = DataCopyTracer(S.from_sector(1))
|
||||
io = core.new_io(
|
||||
cache.get_default_queue(),
|
||||
io = vol.new_io(
|
||||
queue,
|
||||
i * S.from_sector(1).B,
|
||||
read_data.size,
|
||||
IoDir.READ,
|
||||
@ -122,9 +125,7 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
||||
c.wait()
|
||||
|
||||
write_data = DataCopyTracer.from_string("TEST DATA" * 100)
|
||||
io = core.new_io(
|
||||
cache.get_default_queue(), S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0
|
||||
)
|
||||
io = vol.new_io(queue, S.from_sector(1), write_data.size, IoDir.WRITE, 0, 0)
|
||||
io.set_data(write_data)
|
||||
|
||||
cmpl = OcfCompletion([("err", c_int)])
|
||||
@ -147,7 +148,6 @@ def test_secure_erase_simple_io_read_misses(cache_mode):
|
||||
+ stats["req"]["rd_full_misses"]["value"]
|
||||
) > 0
|
||||
|
||||
|
||||
@pytest.mark.security
|
||||
def test_secure_erase_simple_io_cleaning():
|
||||
"""
|
||||
@ -176,11 +176,11 @@ def test_secure_erase_simple_io_cleaning():
|
||||
core_device = RamVolume(S.from_MiB(100))
|
||||
core = Core.using_device(core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
read_data = Data(S.from_sector(1).B)
|
||||
io = core.new_io(
|
||||
cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0
|
||||
)
|
||||
io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.WRITE, 0, 0)
|
||||
io.set_data(read_data)
|
||||
|
||||
cmpl = OcfCompletion([("err", c_int)])
|
||||
@ -189,9 +189,7 @@ def test_secure_erase_simple_io_cleaning():
|
||||
cmpl.wait()
|
||||
|
||||
read_data = Data(S.from_sector(8).B)
|
||||
io = core.new_io(
|
||||
cache.get_default_queue(), S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0
|
||||
)
|
||||
io = vol.new_io(queue, S.from_sector(1).B, read_data.size, IoDir.READ, 0, 0)
|
||||
io.set_data(read_data)
|
||||
|
||||
cmpl = OcfCompletion([("err", c_int)])
|
||||
|
@ -18,6 +18,7 @@ from pyocf.types.cache import (
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import ErrorDevice, RamVolume, VOLUME_POISON
|
||||
from pyocf.types.volume_core import CoreVolume
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.types.ioclass import IoClassesInfo, IoClassInfo
|
||||
from pyocf.utils import Size as S
|
||||
@ -34,20 +35,20 @@ mngmt_op_surprise_shutdown_test_cache_size = S.from_MiB(40)
|
||||
mngmt_op_surprise_shutdown_test_io_offset = S.from_MiB(4).B
|
||||
|
||||
|
||||
def ocf_write(cache, core, val, offset):
|
||||
def ocf_write(vol, queue, val, offset):
|
||||
data = Data.from_bytes(bytes([val] * 512))
|
||||
comp = OcfCompletion([("error", c_int)])
|
||||
io = core.new_io(cache.get_default_queue(), offset, 512, IoDir.WRITE, 0, 0)
|
||||
io = vol.new_io(queue, offset, 512, IoDir.WRITE, 0, 0)
|
||||
io.set_data(data)
|
||||
io.callback = comp.callback
|
||||
io.submit()
|
||||
comp.wait()
|
||||
|
||||
|
||||
def ocf_read(cache, core, offset):
|
||||
def ocf_read(vol, queue, offset):
|
||||
data = Data(byte_count=512)
|
||||
comp = OcfCompletion([("error", c_int)])
|
||||
io = core.new_io(cache.get_default_queue(), offset, 512, IoDir.READ, 0, 0)
|
||||
io = vol.new_io(queue, offset, 512, IoDir.READ, 0, 0)
|
||||
io.set_data(data)
|
||||
io.callback = comp.callback
|
||||
io.submit()
|
||||
@ -163,7 +164,8 @@ def test_surprise_shutdown_remove_core_with_data(pyocf_ctx):
|
||||
|
||||
def prepare_func(cache):
|
||||
cache.add_core(core)
|
||||
ocf_write(cache, core, 0xAA, io_offset)
|
||||
vol = CoreVolume(core, open=True)
|
||||
ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset)
|
||||
|
||||
def tested_func(cache):
|
||||
cache.flush()
|
||||
@ -175,7 +177,8 @@ def test_surprise_shutdown_remove_core_with_data(pyocf_ctx):
|
||||
assert core_device.get_bytes()[io_offset] == 0xAA
|
||||
else:
|
||||
core = cache.get_core_by_name("core1")
|
||||
assert ocf_read(cache, core, io_offset) == 0xAA
|
||||
vol = CoreVolume(core, open=True)
|
||||
assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
|
||||
|
||||
mngmt_op_surprise_shutdown_test(pyocf_ctx, tested_func, prepare_func, check_func)
|
||||
|
||||
@ -226,8 +229,9 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_ctx):
|
||||
|
||||
def prepare(cache):
|
||||
cache.add_core(core1)
|
||||
vol = CoreVolume(core1, open=True)
|
||||
cache.save()
|
||||
ocf_write(cache, core1, 0xAA, mngmt_op_surprise_shutdown_test_io_offset)
|
||||
ocf_write(vol, cache.get_default_queue(), 0xAA, mngmt_op_surprise_shutdown_test_io_offset)
|
||||
cache.remove_core(core1)
|
||||
cache.save()
|
||||
|
||||
@ -249,9 +253,10 @@ def test_surprise_shutdown_swap_core_with_data(pyocf_ctx):
|
||||
core2 = cache.get_core_by_name("core2")
|
||||
|
||||
if core2 is not None:
|
||||
vol2 = CoreVolume(core2, open=True)
|
||||
assert core2.device.uuid == "dev2"
|
||||
assert (
|
||||
ocf_read(cache, core2, mngmt_op_surprise_shutdown_test_io_offset)
|
||||
ocf_read(vol2, cache.get_default_queue(), mngmt_op_surprise_shutdown_test_io_offset)
|
||||
== VOLUME_POISON
|
||||
)
|
||||
|
||||
@ -328,7 +333,8 @@ def test_surprise_shutdown_stop_cache(pyocf_ctx):
|
||||
cache = Cache.start_on_device(device, cache_mode=CacheMode.WB)
|
||||
core = Core(device=core_device)
|
||||
cache.add_core(core)
|
||||
ocf_write(cache, core, 0xAA, io_offset)
|
||||
vol = CoreVolume(core, open=True)
|
||||
ocf_write(vol, cache.get_default_queue(), 0xAA, io_offset)
|
||||
|
||||
# start error injection
|
||||
device.arm()
|
||||
@ -361,7 +367,8 @@ def test_surprise_shutdown_stop_cache(pyocf_ctx):
|
||||
assert stats["usage"]["occupancy"]["value"] == 1
|
||||
core = Core(device=core_device)
|
||||
cache.add_core(core, try_add=True)
|
||||
assert ocf_read(cache, core, io_offset) == 0xAA
|
||||
vol = CoreVolume(core, open=True)
|
||||
assert ocf_read(vol, cache.get_default_queue(), io_offset) == 0xAA
|
||||
|
||||
cache.stop()
|
||||
|
||||
@ -388,9 +395,11 @@ def test_surprise_shutdown_cache_reinit(pyocf_ctx):
|
||||
cache = Cache.start_on_device(device, cache_mode=CacheMode.WB)
|
||||
core = Core(device=core_device)
|
||||
cache.add_core(core)
|
||||
vol = CoreVolume(core, open=True)
|
||||
queue = cache.get_default_queue()
|
||||
|
||||
# insert dirty cacheline
|
||||
ocf_write(cache, core, 0xAA, io_offset)
|
||||
ocf_write(vol, queue, 0xAA, io_offset)
|
||||
|
||||
cache.stop()
|
||||
|
||||
@ -432,7 +441,8 @@ def test_surprise_shutdown_cache_reinit(pyocf_ctx):
|
||||
if stats["conf"]["core_count"] == 0:
|
||||
assert stats["usage"]["occupancy"]["value"] == 0
|
||||
cache.add_core(core)
|
||||
assert ocf_read(cache, core, io_offset) == VOLUME_POISON
|
||||
vol = CoreVolume(core, open=True)
|
||||
assert ocf_read(vol, cache.get_default_queue(), io_offset) == VOLUME_POISON
|
||||
|
||||
cache.stop()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user