Adapt all python code to PEP8 style standards
Signed-off-by: Kamil Lepek <kamil.lepek94@gmail.com>
This commit is contained in:
@@ -3,7 +3,6 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
#
|
||||
|
||||
import pytest
|
||||
from ctypes import c_int, memmove, cast, c_void_p
|
||||
from enum import IntEnum
|
||||
from itertools import product
|
||||
@@ -11,11 +10,12 @@ import random
|
||||
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import Volume, ErrorDevice
|
||||
from pyocf.types.volume import Volume
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
from pyocf.types.shared import OcfError, OcfCompletion
|
||||
from pyocf.types.shared import OcfCompletion
|
||||
|
||||
|
||||
def __io(io, queue, address, size, data, direction):
|
||||
io.set_data(data, 0)
|
||||
@@ -38,13 +38,16 @@ def _io(io, queue, address, size, data, offset, direction):
|
||||
memmove(cast(data, c_void_p).value + offset, _data.handle, size)
|
||||
return ret
|
||||
|
||||
|
||||
def io_to_core(core, address, size, data, offset, direction):
|
||||
return _io(core.new_core_io(), core.cache.get_default_queue(), address, size,
|
||||
data, offset, direction)
|
||||
data, offset, direction)
|
||||
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction):
|
||||
return _io(core.new_io(), core.cache.get_default_queue(), address, size, data,
|
||||
offset, direction)
|
||||
offset, direction)
|
||||
|
||||
|
||||
def sector_to_region(sector, region_start):
|
||||
i = 0
|
||||
@@ -52,10 +55,12 @@ def sector_to_region(sector, region_start):
|
||||
i += 1
|
||||
return i
|
||||
|
||||
|
||||
class SectorStatus(IntEnum):
|
||||
DIRTY = 0,
|
||||
CLEAN = 1,
|
||||
INVALID = 2,
|
||||
DIRTY = 0,
|
||||
CLEAN = 1,
|
||||
INVALID = 2,
|
||||
|
||||
|
||||
I = SectorStatus.INVALID
|
||||
D = SectorStatus.DIRTY
|
||||
@@ -85,6 +90,8 @@ C = SectorStatus.CLEAN
|
||||
# - if clean, exported object sector no @n is filled with 100 + @n
|
||||
# - if dirty, exported object sector no @n is filled with 200 + @n
|
||||
#
|
||||
|
||||
|
||||
def test_wo_read_data_consistency(pyocf_ctx):
|
||||
# start sector for each region
|
||||
region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
|
||||
@@ -114,11 +121,11 @@ def test_wo_read_data_consistency(pyocf_ctx):
|
||||
|
||||
data = {}
|
||||
# memset n-th sector of core data with n
|
||||
data[SectorStatus.INVALID] = bytes([x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
data[SectorStatus.INVALID] = bytes([x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
|
||||
# memset n-th sector of clean data with n + 100
|
||||
data[SectorStatus.CLEAN] = bytes([100 + x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
data[SectorStatus.CLEAN] = bytes([100 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
|
||||
# memset n-th sector of dirty data with n + 200
|
||||
data[SectorStatus.DIRTY] = bytes([200 + x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
data[SectorStatus.DIRTY] = bytes([200 + x // SECTOR_SIZE for x in range(WORKSET_SIZE)])
|
||||
|
||||
result_b = bytes(WORKSET_SIZE)
|
||||
|
||||
@@ -137,30 +144,30 @@ def test_wo_read_data_consistency(pyocf_ctx):
|
||||
combinations.append(S)
|
||||
random.shuffle(combinations)
|
||||
|
||||
# add fixed test cases at the beginnning
|
||||
# add fixed test cases at the beginning
|
||||
combinations = fixed_combinations + combinations
|
||||
|
||||
for S in combinations[:ITRATION_COUNT]:
|
||||
# write data to core and invalidate all CL
|
||||
cache.change_cache_mode(cache_mode = CacheMode.PT)
|
||||
io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]), \
|
||||
data[SectorStatus.INVALID], 0, IoDir.WRITE)
|
||||
cache.change_cache_mode(cache_mode=CacheMode.PT)
|
||||
io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]),
|
||||
data[SectorStatus.INVALID], 0, IoDir.WRITE)
|
||||
|
||||
# insert clean sectors
|
||||
cache.change_cache_mode(cache_mode = CacheMode.WT)
|
||||
cache.change_cache_mode(cache_mode=CacheMode.WT)
|
||||
for sec in range(SECTOR_COUNT):
|
||||
region = sector_to_region(sec, region_start)
|
||||
if S[region] == SectorStatus.CLEAN:
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, \
|
||||
data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
|
||||
data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
|
||||
# write dirty sectors
|
||||
cache.change_cache_mode(cache_mode = CacheMode.WO)
|
||||
cache.change_cache_mode(cache_mode=CacheMode.WO)
|
||||
for sec in range(SECTOR_COUNT):
|
||||
region = sector_to_region(sec, region_start)
|
||||
if S[region] == SectorStatus.DIRTY:
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, \
|
||||
data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE,
|
||||
data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
|
||||
for s in start_sec:
|
||||
for e in end_sec:
|
||||
@@ -171,10 +178,9 @@ def test_wo_read_data_consistency(pyocf_ctx):
|
||||
START = s * SECTOR_SIZE
|
||||
END = e * SECTOR_SIZE
|
||||
size = (e - s + 1) * SECTOR_SIZE
|
||||
assert(0 == io_to_exp_obj(core, WORKSET_OFFSET + START, size, \
|
||||
result_b, START, IoDir.READ)), \
|
||||
"error reading in WO mode: S={}, start={}, end={}".format( \
|
||||
S, s, e)
|
||||
assert(0 == io_to_exp_obj(core, WORKSET_OFFSET + START, size,
|
||||
result_b, START, IoDir.READ)),\
|
||||
"error reading in WO mode: S={}, start={}, end={}".format(S, s, e)
|
||||
|
||||
# verify read data
|
||||
for sec in range(s, e + 1):
|
||||
@@ -182,6 +188,4 @@ def test_wo_read_data_consistency(pyocf_ctx):
|
||||
region = sector_to_region(sec, region_start)
|
||||
check_byte = sec * SECTOR_SIZE
|
||||
assert(result_b[check_byte] == data[S[region]][check_byte]), \
|
||||
"unexpected data in sector {}, S={}, s={}, e={}\n".format( \
|
||||
sec, S, s, e)
|
||||
|
||||
"unexpected data in sector {}, S={}, s={}, e={}\n".format(sec, S, s, e)
|
||||
|
@@ -111,8 +111,9 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL
|
||||
core_device.reset_stats()
|
||||
|
||||
test_data = Data.from_string("Changed test data")
|
||||
|
||||
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
|
||||
check_stats_write_after_read(core_exported, mode, cls, True)
|
||||
|
||||
logger.info("[STAGE] Read from exported object after write")
|
||||
@@ -159,7 +160,8 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
|
||||
assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy"
|
||||
assert stats["conf"]["cache_id"] == cache_id, "Cache id"
|
||||
assert cache.get_name() == name, "Cache name"
|
||||
# TODO: metadata_layout, metadata_volatile, max_queue_size, queue_unblock_size, pt_unaligned_io, use_submit_fast
|
||||
# TODO: metadata_layout, metadata_volatile, max_queue_size,
|
||||
# queue_unblock_size, pt_unaligned_io, use_submit_fast
|
||||
# TODO: test in functional tests
|
||||
|
||||
|
||||
@@ -254,8 +256,9 @@ def test_100_start_stop(pyocf_ctx):
|
||||
|
||||
def test_start_stop_incrementally(pyocf_ctx):
|
||||
"""Starting/stopping multiple caches incrementally.
|
||||
Check whether OCF behaves correctly when few caches at a time are in turns added and removed (#added > #removed)
|
||||
until their number reaches limit, and then proportions are reversed and number of caches gradually falls to 0.
|
||||
Check whether OCF behaves correctly when few caches at a time are
|
||||
in turns added and removed (#added > #removed) until their number reaches limit,
|
||||
and then proportions are reversed and number of caches gradually falls to 0.
|
||||
"""
|
||||
|
||||
caches = []
|
||||
@@ -292,7 +295,8 @@ def test_start_stop_incrementally(pyocf_ctx):
|
||||
stats = cache.get_stats()
|
||||
cache_id = stats["conf"]["cache_id"]
|
||||
cache.stop()
|
||||
assert get_cache_by_id(pyocf_ctx, cache_id) != 0, "Try getting cache after stopping it"
|
||||
assert get_cache_by_id(pyocf_ctx, cache_id) !=\
|
||||
0, "Try getting cache after stopping it"
|
||||
add = not add
|
||||
|
||||
|
||||
@@ -306,11 +310,17 @@ def test_start_cache_same_id(pyocf_ctx, mode, cls):
|
||||
cache_device1 = Volume(Size.from_MiB(20))
|
||||
cache_device2 = Volume(Size.from_MiB(20))
|
||||
cache_id = randrange(1, 16385)
|
||||
cache = Cache.start_on_device(cache_device1, cache_mode=mode, cache_line_size=cls, cache_id=cache_id)
|
||||
cache = Cache.start_on_device(cache_device1,
|
||||
cache_mode=mode,
|
||||
cache_line_size=cls,
|
||||
cache_id=cache_id)
|
||||
cache.get_stats()
|
||||
|
||||
with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
|
||||
cache = Cache.start_on_device(cache_device2, cache_mode=mode, cache_line_size=cls, cache_id=cache_id)
|
||||
cache = Cache.start_on_device(cache_device2,
|
||||
cache_mode=mode,
|
||||
cache_line_size=cls,
|
||||
cache_id=cache_id)
|
||||
cache.get_stats()
|
||||
|
||||
|
||||
@@ -418,14 +428,20 @@ def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineS
|
||||
"Occupancy"
|
||||
|
||||
|
||||
def check_stats_write_after_read(exported_obj: Core, mode: CacheMode, cls: CacheLineSize, read_from_empty=False):
|
||||
def check_stats_write_after_read(exported_obj: Core,
|
||||
mode: CacheMode,
|
||||
cls: CacheLineSize,
|
||||
read_from_empty=False):
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (2 if read_from_empty and mode.lazy_write() else 1)), \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else
|
||||
(2 if read_from_empty and mode.lazy_write() else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
"Writes to core device"
|
||||
assert stats["req"]["wr_hits"]["value"] == (1 if (mode.read_insert() and mode != CacheMode.WI) or (mode.write_insert() and not read_from_empty) else 0), \
|
||||
assert stats["req"]["wr_hits"]["value"] == \
|
||||
(1 if (mode.read_insert() and mode != CacheMode.WI)
|
||||
or (mode.write_insert() and not read_from_empty) else 0), \
|
||||
"Write hits"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
|
||||
@@ -438,16 +454,20 @@ def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
|
||||
(2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.READ] == \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
||||
or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
"Reads from cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.READ] == \
|
||||
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT} or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
||||
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT}
|
||||
or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
||||
"Reads from core device"
|
||||
assert stats["req"]["rd_full_misses"]["value"] == (1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
|
||||
assert stats["req"]["rd_full_misses"]["value"] == \
|
||||
(1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
|
||||
+ (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
|
||||
"Read full misses"
|
||||
assert stats["req"]["rd_hits"]["value"] == \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO}
|
||||
or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
"Read hits"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
|
||||
@@ -467,4 +487,6 @@ def check_md5_sums(exported_obj: Core, mode: CacheMode):
|
||||
|
||||
def get_cache_by_id(ctx, cache_id):
|
||||
cache_pointer = c_void_p()
|
||||
return OcfLib.getInstance().ocf_mngt_cache_get_by_id(ctx.ctx_handle, cache_id, byref(cache_pointer))
|
||||
return OcfLib.getInstance().ocf_mngt_cache_get_by_id(ctx.ctx_handle,
|
||||
cache_id,
|
||||
byref(cache_pointer))
|
||||
|
@@ -12,7 +12,6 @@ from pyocf.utils import Size
|
||||
from pyocf.types.shared import OcfError, CacheLineSize
|
||||
from ctypes import c_uint32
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -51,7 +50,8 @@ def test_fuzzy_start_cache_line_size(pyocf_ctx, c_uint64_randomize, cm):
|
||||
with pytest.raises(OcfError, match="OCF_ERR_INVALID_CACHE_LINE_SIZE"):
|
||||
try_start_cache(cache_mode=cm, cache_line_size=c_uint64_randomize)
|
||||
else:
|
||||
logger.warning(f"Test skipped for valid cache line size enum value: '{c_uint64_randomize}'. ")
|
||||
logger.warning(
|
||||
f"Test skipped for valid cache line size enum value: '{c_uint64_randomize}'. ")
|
||||
|
||||
|
||||
@pytest.mark.security
|
||||
@@ -67,8 +67,9 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
|
||||
"""
|
||||
cache_device = Volume(Size.from_MiB(30))
|
||||
try:
|
||||
cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm, cache_line_size=cls)
|
||||
except:
|
||||
cache = Cache.start_on_device(cache_device, name=string_randomize, cache_mode=cm,
|
||||
cache_line_size=cls)
|
||||
except OcfError:
|
||||
logger.error(f"Cache did not start properly with correct name value: {string_randomize}")
|
||||
cache.stop()
|
||||
|
||||
@@ -107,7 +108,8 @@ def test_fuzzy_start_eviction_policy(pyocf_ctx, c_uint32_randomize, cm, cls):
|
||||
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
|
||||
try_start_cache(eviction_policy=c_uint32_randomize, cache_mode=cm, cache_line_size=cls)
|
||||
else:
|
||||
logger.warning(f"Test skipped for valid eviction policy enum value: '{c_uint32_randomize}'. ")
|
||||
logger.warning(
|
||||
f"Test skipped for valid eviction policy enum value: '{c_uint32_randomize}'. ")
|
||||
|
||||
|
||||
@pytest.mark.security
|
||||
@@ -125,7 +127,8 @@ def test_fuzzy_start_metadata_layout(pyocf_ctx, c_uint32_randomize, cm, cls):
|
||||
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
|
||||
try_start_cache(metadata_layout=c_uint32_randomize, cache_mode=cm, cache_line_size=cls)
|
||||
else:
|
||||
logger.warning(f"Test skipped for valid metadata layout enum value: '{c_uint32_randomize}'. ")
|
||||
logger.warning(
|
||||
f"Test skipped for valid metadata layout enum value: '{c_uint32_randomize}'. ")
|
||||
|
||||
|
||||
@pytest.mark.security
|
||||
@@ -133,7 +136,8 @@ def test_fuzzy_start_metadata_layout(pyocf_ctx, c_uint32_randomize, cm, cls):
|
||||
@pytest.mark.parametrize('max_wb_queue_size', generate_random_numbers(c_uint32, 10))
|
||||
def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_randomize, cls):
|
||||
"""
|
||||
Test whether it is impossible to start cache with invalid dependence between max queue size and queue unblock size.
|
||||
Test whether it is impossible to start cache with invalid dependence between max queue size
|
||||
and queue unblock size.
|
||||
:param pyocf_ctx: basic pyocf context fixture
|
||||
:param max_wb_queue_size: max queue size value to start cache with
|
||||
:param c_uint32_randomize: queue unblock size value to start cache with
|
||||
@@ -148,4 +152,5 @@ def test_fuzzy_start_max_queue_size(pyocf_ctx, max_wb_queue_size, c_uint32_rando
|
||||
cache_line_size=cls)
|
||||
else:
|
||||
logger.warning(f"Test skipped for valid values: "
|
||||
f"'max_queue_size={max_wb_queue_size}, queue_unblock_size={c_uint32_randomize}'.")
|
||||
f"'max_queue_size={max_wb_queue_size}, "
|
||||
f"queue_unblock_size={c_uint32_randomize}'.")
|
||||
|
Reference in New Issue
Block a user