diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index d1506c6..80c2048 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -154,7 +154,6 @@ class Cache: self.cache_handle = c_void_p() self._as_parameter_ = self.cache_handle self.io_queues = [] - self.device = None self.cores = [] def start_cache( @@ -425,6 +424,7 @@ class Cache: raise OcfError("Failed getting stats", status) line_size = CacheLineSize(cache_info.cache_line_size) + cache_id = self.owner.lib.ocf_cache_get_id(self) self.put_and_read_unlock() return { @@ -455,6 +455,7 @@ class Cache: "core_count": cache_info.core_count, "metadata_footprint": Size(cache_info.metadata_footprint), "metadata_end_offset": Size(cache_info.metadata_end_offset), + "cache_id": cache_id, }, "block": struct_to_dict(block), "req": struct_to_dict(req), diff --git a/tests/functional/pyocf/types/ctx.py b/tests/functional/pyocf/types/ctx.py index b757cb5..775eae5 100644 --- a/tests/functional/pyocf/types/ctx.py +++ b/tests/functional/pyocf/types/ctx.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: BSD-3-Clause-Clear # -from ctypes import c_void_p, Structure, c_char_p, cast, pointer, byref +from ctypes import c_void_p, Structure, c_char_p, cast, pointer, byref, c_int from .logger import LoggerOps, Logger from .data import DataOps, Data @@ -119,3 +119,8 @@ def get_default_ctx(logger): MetadataUpdater, Cleaner, ) + + +lib = OcfLib.getInstance() +lib.ocf_mngt_cache_get_by_id.argtypes = [c_void_p, c_void_p, c_void_p] +lib.ocf_mngt_cache_get_by_id.restype = c_int diff --git a/tests/functional/tests/management/test_start_stop.py b/tests/functional/tests/management/test_start_stop.py new file mode 100644 index 0000000..a907b57 --- /dev/null +++ b/tests/functional/tests/management/test_start_stop.py @@ -0,0 +1,447 @@ +# +# Copyright(c) 2019 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import logging +from ctypes import c_int, c_void_p, byref +from random import randrange + +import pytest + +from pyocf.ocf import OcfLib +from pyocf.types.cache import Cache, CacheMode, MetadataLayout, EvictionPolicy +from pyocf.types.core import Core +from pyocf.types.data import Data +from pyocf.types.io import IoDir +from pyocf.types.shared import OcfError, OcfCompletion, CacheLineSize +from pyocf.types.volume import Volume +from pyocf.utils import Size + +logger = logging.getLogger(__name__) + + +@pytest.mark.parametrize("cls", CacheLineSize) +@pytest.mark.parametrize("mode", CacheMode) +def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): + """Test starting cache in different modes with different cache line sizes. + After start check proper cache mode behaviour, starting with write operation. + """ + + cache_device = Volume(Size.from_MiB(40)) + core_device = Volume(Size.from_MiB(10)) + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + core_exported = Core.using_device(core_device) + + cache.add_core(core_exported) + + logger.info("[STAGE] Initial write to exported object") + cache_device.reset_stats() + core_device.reset_stats() + + test_data = Data.from_string("This is test data") + io_to_core(core_exported, test_data, 20) + check_stats_write_empty(core_exported, mode, cls) + + logger.info("[STAGE] Read from exported object after initial write") + io_from_exported_object(core_exported, test_data.size, 20) + check_stats_read_after_write(core_exported, mode, cls, True) + + logger.info("[STAGE] Write to exported object after read") + cache_device.reset_stats() + core_device.reset_stats() + + test_data = Data.from_string("Changed test data") + + io_to_core(core_exported, test_data, 20) + check_stats_write_after_read(core_exported, mode, cls) + + check_md5_sums(core_exported, mode) + + +@pytest.mark.parametrize("cls", CacheLineSize) +@pytest.mark.parametrize("mode", CacheMode) +def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): + """Starting cache in different modes with different cache line sizes. + After start check proper cache mode behaviour, starting with read operation. + """ + + cache_device = Volume(Size.from_MiB(20)) + core_device = Volume(Size.from_MiB(5)) + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + core_exported = Core.using_device(core_device) + + cache.add_core(core_exported) + + logger.info("[STAGE] Initial write to core device") + test_data = Data.from_string("This is test data") + io_to_core(core_exported, test_data, 20, True) + + cache_device.reset_stats() + core_device.reset_stats() + + logger.info("[STAGE] Initial read from exported object") + io_from_exported_object(core_exported, test_data.size, 20) + check_stats_read_empty(core_exported, mode, cls) + + logger.info("[STAGE] Write to exported object after initial read") + cache_device.reset_stats() + core_device.reset_stats() + + test_data = Data.from_string("Changed test data") + + io_to_core(core_exported, test_data, 20) + check_stats_write_after_read(core_exported, mode, cls, True) + + logger.info("[STAGE] Read from exported object after write") + io_from_exported_object(core_exported, test_data.size, 20) + check_stats_read_after_write(core_exported, mode, cls) + + check_md5_sums(core_exported, mode) + + +@pytest.mark.parametrize("cls", CacheLineSize) +@pytest.mark.parametrize("mode", CacheMode) +@pytest.mark.parametrize("layout", MetadataLayout) +def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: MetadataLayout): + """Starting cache with different parameters. + Check if cache starts without errors. + If possible check whether cache reports properly set parameters. + """ + cache_device = Volume(Size.from_MiB(20)) + queue_size = randrange(60000, 2**32) + unblock_size = randrange(1, queue_size) + cache_id = randrange(1, 16385) + volatile_metadata = randrange(2) == 1 + unaligned_io = randrange(2) == 1 + submit_fast = randrange(2) == 1 + name = "test" + + logger.info("[STAGE] Start cache") + cache = Cache.start_on_device( + cache_device, + cache_mode=mode, + cache_line_size=cls, + cache_id=cache_id, + name=name, + metadata_layout=MetadataLayout.SEQUENTIAL, + metadata_volatile=volatile_metadata, + max_queue_size=queue_size, + queue_unblock_size=unblock_size, + pt_unaligned_io=unaligned_io, + use_submit_fast=submit_fast) + + stats = cache.get_stats() + assert stats["conf"]["cache_mode"] == mode, "Cache mode" + assert stats["conf"]["cache_line_size"] == cls, "Cache line size" + assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy" + assert stats["conf"]["cache_id"] == cache_id, "Cache id" + assert cache.get_name() == name, "Cache name" + # TODO: metadata_layout, metadata_volatile, max_queue_size, queue_unblock_size, pt_unaligned_io, use_submit_fast + # TODO: test in functional tests + + +@pytest.mark.parametrize("cls", CacheLineSize) +@pytest.mark.parametrize("mode", CacheMode) +@pytest.mark.parametrize("with_flush", {True, False}) +def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool): + """Stopping cache. + Check if cache is stopped properly in different modes with or without preceding flush operation. + """ + + cache_device = Volume(Size.from_MiB(20)) + core_device = Volume(Size.from_MiB(5)) + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + core_exported = Core.using_device(core_device) + cache.add_core(core_exported) + + run_io_and_cache_data_if_possible(core_exported, mode, cls) + + stats = cache.get_stats() + assert int(stats["conf"]["dirty"]) == (1 if mode == CacheMode.WB else 0), "Dirty data before MD5" + + md5_exported_core = core_exported.exp_obj_md5() + + if with_flush: + cache.flush() + cache.stop() + + if mode == CacheMode.WB and not with_flush: + pytest.xfail("MD5 sums equal without flush with dirty data") # TODO: remove after WB fixed + assert core_device.md5() != md5_exported_core, \ + "MD5 check: core device vs exported object with dirty data" + else: + assert core_device.md5() == md5_exported_core, \ + "MD5 check: core device vs exported object with clean data" + + +def test_start_stop_multiple(pyocf_ctx): + """Starting/stopping multiple caches. + Check whether OCF allows for starting multiple caches and stopping them in random order + """ + + caches = [] + caches_no = randrange(6, 11) + for i in range(1, caches_no): + cache_device = Volume(Size.from_MiB(20)) + cache_mode = CacheMode(randrange(0, len(CacheMode))) + size = 4096 * 2**randrange(0, len(CacheLineSize)) + cache_line_size = CacheLineSize(size) + + cache = Cache.start_on_device( + cache_device, + cache_mode=cache_mode, + cache_line_size=cache_line_size) + caches.append(cache) + stats = cache.get_stats() + assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode" + assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size" + assert stats["conf"]["cache_id"] == i, "Cache id" + + caches.sort(key=lambda e: randrange(1000)) + for cache in caches: + logger.info("Getting stats before stopping cache") + stats = cache.get_stats() + cache_id = stats["conf"]["cache_id"] + cache.stop() + assert get_cache_by_id(pyocf_ctx, cache_id) != 0, "Try getting cache after stopping it" + + +def test_100_start_stop(pyocf_ctx): + """Starting/stopping stress test. + Check OCF behaviour when cache is started and stopped continuously + """ + + for i in range(1, 101): + cache_device = Volume(Size.from_MiB(20)) + cache_mode = CacheMode(randrange(0, len(CacheMode))) + size = 4096 * 2**randrange(0, len(CacheLineSize)) + cache_line_size = CacheLineSize(size) + + cache = Cache.start_on_device( + cache_device, + cache_mode=cache_mode, + cache_line_size=cache_line_size) + stats = cache.get_stats() + assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode" + assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size" + assert stats["conf"]["cache_id"] == 1, "Cache id" + cache.stop() + assert get_cache_by_id(pyocf_ctx, 1) != 0, "Try getting cache after stopping it" + + +def test_start_stop_incrementally(pyocf_ctx): + """Starting/stopping multiple caches incrementally. + Check whether OCF behaves correctly when few caches at a time are in turns added and removed (#added > #removed) + until their number reaches limit, and then proportions are reversed and number of caches gradually falls to 0. + """ + + caches = [] + caches_limit = 10 + add = True + run = True + increase = True + while run: + if add: + for i in range(0, randrange(3, 5) if increase else randrange(1, 3)): + cache_device = Volume(Size.from_MiB(20)) + cache_mode = CacheMode(randrange(0, len(CacheMode))) + size = 4096 * 2**randrange(0, len(CacheLineSize)) + cache_line_size = CacheLineSize(size) + + cache = Cache.start_on_device( + cache_device, + cache_mode=cache_mode, + cache_line_size=cache_line_size) + caches.append(cache) + stats = cache.get_stats() + assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode" + assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size" + assert stats["conf"]["cache_id"] == len(caches), "Cache id" + if len(caches) == caches_limit: + increase = False + else: + for i in range(0, randrange(1, 3) if increase else randrange(3, 5)): + if len(caches) == 0: + run = False + break + cache = caches.pop() + logger.info("Getting stats before stopping cache") + stats = cache.get_stats() + cache_id = stats["conf"]["cache_id"] + cache.stop() + assert get_cache_by_id(pyocf_ctx, cache_id) != 0, "Try getting cache after stopping it" + add = not add + + +@pytest.mark.parametrize("mode", CacheMode) +@pytest.mark.parametrize("cls", CacheLineSize) +def test_start_cache_same_id(pyocf_ctx, mode, cls): + """Adding two caches with the same cache_id + Check that OCF does not allow for 2 caches to be started with the same cache_id + """ + + cache_device1 = Volume(Size.from_MiB(20)) + cache_device2 = Volume(Size.from_MiB(20)) + cache_id = randrange(1, 16385) + cache = Cache.start_on_device(cache_device1, cache_mode=mode, cache_line_size=cls, cache_id=cache_id) + cache.get_stats() + + with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"): + cache = Cache.start_on_device(cache_device2, cache_mode=mode, cache_line_size=cls, cache_id=cache_id) + cache.get_stats() + + +@pytest.mark.parametrize("mode", CacheMode) +@pytest.mark.parametrize("cls", CacheLineSize) +def test_start_cache_same_device(pyocf_ctx, mode, cls): + """Adding two caches using the same cache device + Check that OCF does not allow for 2 caches using the same cache device to be started + """ + + cache_device = Volume(Size.from_MiB(20)) + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + cache.get_stats() + + with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"): + cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + cache.get_stats() + + +@pytest.mark.parametrize("mode", CacheMode) +@pytest.mark.parametrize("cls", CacheLineSize) +def test_start_too_small_device(pyocf_ctx, mode, cls): + """Starting cache with device below 100MiB + Check if starting cache with device below minimum size is blocked + """ + + cache_device = Volume(Size.from_B(20 * 1024 * 1024 - 1)) + + with pytest.raises(OcfError, match="OCF_ERR_START_CACHE_FAIL"): + Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls) + + +def run_io_and_cache_data_if_possible(exported_obj, mode, cls): + test_data = Data.from_string("This is test data") + + if mode in {CacheMode.WI, CacheMode.WA}: + logger.info("[STAGE] Write to core device") + io_to_core(exported_obj, test_data, 20, True) + logger.info("[STAGE] Read from exported object") + io_from_exported_object(exported_obj, test_data.size, 20) + else: + logger.info("[STAGE] Write to exported object") + io_to_core(exported_obj, test_data, 20) + + stats = exported_obj.cache.get_stats() + assert stats["usage"]["occupancy"]["value"] == \ + ((cls / CacheLineSize.LINE_4KiB) if mode != CacheMode.PT else 0), "Occupancy" + + +def io_to_core(exported_obj: Core, data: Data, offset: int, to_core_device=False): + io = exported_obj.new_core_io() if to_core_device else exported_obj.new_io() + io.set_data(data) + io.configure(offset, data.size, IoDir.WRITE, 0, 0) + io.set_queue(exported_obj.cache.get_default_queue()) + + completion = OcfCompletion([("err", c_int)]) + io.callback = completion.callback + io.submit() + completion.wait() + + assert completion.results["err"] == 0, "IO to exported object completion" + + +def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int): + read_buffer = Data(buffer_size) + io = exported_obj.new_io() + io.configure(offset, read_buffer.size, IoDir.READ, 0, 0) + io.set_data(read_buffer) + io.set_queue(exported_obj.cache.get_default_queue()) + + completion = OcfCompletion([("err", c_int)]) + io.callback = completion.callback + io.submit() + completion.wait() + + assert completion.results["err"] == 0, "IO from exported object completion" + return read_buffer + + +def check_stats_read_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize): + stats = exported_obj.cache.get_stats() + assert stats["conf"]["cache_mode"] == mode, "Cache mode" + assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.PT else 1), \ + "Writes to cache device" + assert exported_obj.device.get_stats()[IoDir.READ] == 1, "Reads from core device" + assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \ + "Read full misses" + assert stats["usage"]["occupancy"]["value"] == \ + (0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy" + + +def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize): + stats = exported_obj.cache.get_stats() + assert stats["conf"]["cache_mode"] == mode, "Cache mode" + assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \ + (2 if mode == CacheMode.WB else (1 if mode == CacheMode.WT else 0)), \ + "Writes to cache device" + assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.WB else 1), \ + "Writes to core device" + assert stats["req"]["wr_full_misses"]["value"] == (1 if mode in {CacheMode.WT, CacheMode.WB} else 0), \ + "Write full misses" + assert stats["usage"]["occupancy"]["value"] == \ + ((cls / CacheLineSize.LINE_4KiB) if mode in {CacheMode.WB, CacheMode.WT} else 0), \ + "Occupancy" + + +def check_stats_write_after_read(exported_obj: Core, mode: CacheMode, cls: CacheLineSize, read_from_empty=False): + stats = exported_obj.cache.get_stats() + assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \ + (0 if mode in {CacheMode.WI, CacheMode.PT} else (2 if read_from_empty and mode == CacheMode.WB else 1)), \ + "Writes to cache device" + assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.WB else 1), \ + "Writes to core device" + assert stats["req"]["wr_hits"]["value"] == (0 if mode in {CacheMode.WI, CacheMode.PT} else 1), \ + "Write hits" + assert stats["usage"]["occupancy"]["value"] == \ + (0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \ + "Occupancy" + + +def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False): + stats = exported_obj.cache.get_stats() + assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \ + (2 if mode == CacheMode.WB else (0 if mode == CacheMode.PT else 1)), \ + "Writes to cache device" + assert exported_obj.cache.device.get_stats()[IoDir.READ] == \ + (1 if mode in {CacheMode.WT, CacheMode.WB} or (mode == CacheMode.WA and not write_to_empty) else 0), \ + "Reads from cache device" + assert exported_obj.device.get_stats()[IoDir.READ] == \ + (0 if mode in {CacheMode.WB, CacheMode.WT} or (mode == CacheMode.WA and not write_to_empty) else 1), \ + "Reads from core device" + assert stats["req"]["rd_full_misses"]["value"] == (1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \ + + (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \ + "Read full misses" + assert stats["req"]["rd_hits"]["value"] == \ + (1 if mode in {CacheMode.WT, CacheMode.WB} or (mode == CacheMode.WA and not write_to_empty) else 0), \ + "Read hits" + assert stats["usage"]["occupancy"]["value"] == \ + (0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy" + + +def check_md5_sums(exported_obj: Core, mode: CacheMode): + if mode == CacheMode.WB: + assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \ + "MD5 check: core device vs exported object without flush" + exported_obj.cache.flush() + assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \ + "MD5 check: core device vs exported object after flush" + else: + assert exported_obj.device.md5() == exported_obj.exp_obj_md5(), \ + "MD5 check: core device vs exported object" + + +def get_cache_by_id(ctx, cache_id): + cache_pointer = c_void_p() + return OcfLib.getInstance().ocf_mngt_cache_get_by_id(ctx.ctx_handle, cache_id, byref(cache_pointer))