diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c index 3c2352d..377b0b8 100644 --- a/src/metadata/metadata_hash.c +++ b/src/metadata/metadata_hash.c @@ -10,6 +10,7 @@ #include "metadata_status.h" #include "../concurrency/ocf_concurrency.h" #include "../utils/utils_cache_line.h" +#include "../utils/utils_io.h" #include "../utils/utils_pipeline.h" #include "../ocf_def_priv.h" #include "../ocf_priv.h" @@ -1580,6 +1581,29 @@ static void ocf_metadata_hash_flush_superblock_finish(ocf_pipeline_t pipeline, ocf_pipeline_destroy(pipeline); } +static void ocf_metadata_hash_flush_disk_end(void *priv, int error) +{ + struct ocf_metadata_hash_context *context = priv; + ocf_pipeline_t pipeline = context->pipeline; + + if (error) { + OCF_PL_FINISH_RET(pipeline, error); + return; + } + + ocf_pipeline_next(pipeline); +} + +static void ocf_metadata_hash_flush_disk(ocf_pipeline_t pipeline, + void *priv, ocf_pipeline_arg_t arg) +{ + struct ocf_metadata_hash_context *context = priv; + ocf_cache_t cache = context->cache; + + ocf_submit_volume_flush(ocf_cache_get_volume(cache), + ocf_metadata_hash_flush_disk_end, context); +} + struct ocf_pipeline_arg ocf_metadata_hash_flush_sb_calculate_crc_args[] = { OCF_PL_ARG_INT(metadata_segment_part_config), OCF_PL_ARG_INT(metadata_segment_core_config), @@ -1605,6 +1629,7 @@ struct ocf_pipeline_properties ocf_metadata_hash_flush_sb_pipeline_props = { ocf_metadata_hash_flush_sb_calculate_crc_args), OCF_PL_STEP_FOREACH(ocf_medatata_hash_flush_segment, ocf_metadata_hash_flush_sb_flush_segment_args), + OCF_PL_STEP(ocf_metadata_hash_flush_disk), OCF_PL_STEP_TERMINATOR(), }, }; diff --git a/tests/functional/pyocf/types/cache.py b/tests/functional/pyocf/types/cache.py index a040f7e..d067c0f 100644 --- a/tests/functional/pyocf/types/cache.py +++ b/tests/functional/pyocf/types/cache.py @@ -522,12 +522,12 @@ class Cache: if not self.started: raise Exception("Not started!") - self.get_and_write_lock() + self.write_lock() c = OcfCompletion([("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]) self.owner.lib.ocf_mngt_cache_save(self.cache_handle, c, None) c.wait() - self.put_and_write_unlock() + self.write_unlock() if c.results["error"]: raise OcfError("Failed saving cache", c.results["error"]) diff --git a/tests/functional/tests/flush/test_flush_after_mngmt.py b/tests/functional/tests/flush/test_flush_after_mngmt.py new file mode 100644 index 0000000..28df50b --- /dev/null +++ b/tests/functional/tests/flush/test_flush_after_mngmt.py @@ -0,0 +1,114 @@ +# +# Copyright(c) 2020 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +from ctypes import c_int, memmove, cast, c_void_p +from enum import IntEnum +from itertools import product +import random + +import pytest + +from pyocf.types.cache import Cache, CacheMode +from pyocf.types.core import Core +from pyocf.types.volume import Volume +from pyocf.types.data import Data +from pyocf.types.io import IoDir +from pyocf.utils import Size +from pyocf.types.shared import OcfCompletion + + +def __io(io, queue, address, size, data, direction): + io.set_data(data, 0) + completion = OcfCompletion([("err", c_int)]) + io.callback = completion.callback + io.submit() + completion.wait() + return int(completion.results["err"]) + + +def _io(new_io, queue, address, size, data, offset, direction, flags): + io = new_io(queue, address, size, direction, 0, flags) + if direction == IoDir.READ: + _data = Data.from_bytes(bytes(size)) + else: + _data = Data.from_bytes(data, offset, size) + ret = __io(io, queue, address, size, _data, direction) + if not ret and direction == IoDir.READ: + memmove(cast(data, c_void_p).value + offset, _data.handle, size) + return ret + + +def io_to_exp_obj(core, address, size, data, offset, direction, flags): + return _io( + core.new_io, + core.cache.get_default_queue(), + address, + size, + data, + offset, + direction, + flags, + ) + + +class FlushValVolume(Volume): + def __init__(self, size): + self.flush_last = False + super().__init__(size) + + def submit_io(self, io): + self.flush_last = False + super().submit_io(io) + + def submit_flush(self, flush): + self.flush_last = True + super().submit_flush(flush) + + +def test_flush_after_mngmt(pyocf_ctx): + """ + Check whether underlying volumes volatile caches (VC) are flushed after management operation + """ + block_size = 4096 + + data = bytes(block_size) + + cache_device = FlushValVolume(Size.from_MiB(30)) + core_device = FlushValVolume(Size.from_MiB(30)) + + # after start cache VC must be cleared + cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WT) + assert cache_device.flush_last + + # adding core must flush VC + core = Core.using_device(core_device) + cache.add_core(core) + assert cache_device.flush_last + + # WT I/O to write data to core and cache VC + io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) + + # WB I/O to produce dirty cachelines in CAS + cache.change_cache_mode(CacheMode.WB) + io_to_exp_obj(core, block_size * 1, block_size, data, 0, IoDir.WRITE, 0) + + # after cache flush VCs are expected to be cleared + cache.flush() + assert cache_device.flush_last + assert core_device.flush_last + + # I/O to write data to cache device VC + io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) + + # cache save must flush VC + cache.save() + assert cache_device.flush_last + + # I/O to write data to cache device VC + io_to_exp_obj(core, block_size * 0, block_size, data, 0, IoDir.WRITE, 0) + + # cache stop must flush VC + cache.stop() + assert cache_device.flush_last