Merge pull request #853 from mmichal10/repart

Repart
This commit is contained in:
Robert Baldyga 2025-02-04 16:39:49 +01:00 committed by GitHub
commit be068df400
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 259 additions and 13 deletions

View File

@ -228,6 +228,12 @@ void ocf_engine_lookup(struct ocf_request *req)
ocf_engine_lookup_map_entry(cache, entry, core_id,
core_line);
/*
* The flag could have been set on the previous lookup, but
* cache line might be remapped in the meantime.
*/
entry->re_part = false;
if (entry->status != LOOKUP_HIT) {
/* There is miss then lookup for next map entry */
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
@ -267,6 +273,11 @@ int ocf_engine_check(struct ocf_request *req)
core_line <= req->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(req->map[i]);
/*
* The flag could have been set on the previous traverse, but
* cache line might be remapped in the meantime.
*/
entry->re_part = false;
if (entry->status == LOOKUP_MISS) {
continue;

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -107,16 +108,6 @@ void ocf_user_part_move(struct ocf_request *req)
continue;
}
/* Moving cachelines to another partition is needed only
* for those already mapped before this request and remapped
* cachelines are assigned to target partition during eviction.
* So only hit cachelines are interesting.
*/
if (entry->status != LOOKUP_HIT) {
/* No HIT */
continue;
}
line = entry->coll_idx;
id_old = ocf_metadata_get_partition_id(cache, line);
id_new = req->part_id;
@ -124,6 +115,13 @@ void ocf_user_part_move(struct ocf_request *req)
ENV_BUG_ON(id_old >= OCF_USER_IO_CLASS_MAX ||
id_new >= OCF_USER_IO_CLASS_MAX);
if (unlikely(entry->status == LOOKUP_MISS)) {
ocf_cache_log(cache, log_err, "Attempt to remap "
"an unmapped cache line from ioclass "
"%hu to ioclass %hu\n", id_old, id_new);
ENV_BUG();
}
if (id_old == id_new) {
/* Partition of the request and cache line is the same,
* no need to change partition

View File

@ -956,6 +956,28 @@ class Cache:
"errors": struct_to_dict(errors),
}
def get_ioclass_stats(self, io_class_id):
usage = UsageStats()
req = RequestsStats()
block = BlocksStats()
self.read_lock()
status = self.owner.lib.ocf_stats_collect_part_cache(
self.cache_handle, io_class_id, byref(usage), byref(req), byref(block)
)
self.read_unlock()
if status:
raise OcfError(f"Failed to retrieve ioclass {io_class_id} stats", status)
return {
"usage": struct_to_dict(usage),
"block": struct_to_dict(block),
"req": struct_to_dict(req),
}
def reset_stats(self):
self.owner.lib.ocf_core_stats_initialize_all(self.cache_handle)
@ -1081,6 +1103,14 @@ lib.ocf_stats_collect_cache.argtypes = [
c_void_p,
]
lib.ocf_stats_collect_cache.restype = c_int
lib.ocf_stats_collect_part_cache.argtypes = [
c_void_p,
c_uint16,
c_void_p,
c_void_p,
c_void_p,
]
lib.ocf_stats_collect_part_cache.restype = c_int
lib.ocf_cache_get_info.argtypes = [c_void_p, c_void_p]
lib.ocf_cache_get_info.restype = c_int
lib.ocf_mngt_cache_cleaning_set_param.argtypes = [

View File

@ -108,6 +108,204 @@ def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLin
)
@pytest.mark.parametrize("io_dir", IoDir)
@pytest.mark.parametrize(
"cls", [CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB, CacheLineSize.LINE_64KiB]
)
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
def test_eviction_priority_1(pyocf_ctx, cls: CacheLineSize, cache_mode: CacheMode, io_dir: IoDir):
"""Verify if data of higher priority is not evicted by low priority data"""
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core)
high_prio_ioclass = 1
low_prio_ioclass = 2
cache.configure_partition(
part_id=high_prio_ioclass,
name="high_prio",
max_size=100,
priority=1,
)
cache.configure_partition(
part_id=low_prio_ioclass,
name="low_prio",
max_size=100,
priority=2,
)
def get_ioclass_occupancy(cache, ioclass_id):
return cache.get_ioclass_stats(ioclass_id)["usage"]["occupancy"]["value"]
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size_4k = cache.get_stats()["conf"]["size"].blocks_4k
cache_line_size_4k = Size(cls).blocks_4k
data = Data(4096)
# Populate cache with high priority data
for i in range(cache_size_4k):
send_io(vol, data, i * 4096, high_prio_ioclass, io_dir)
high_prio_ioclass_occupancy = get_ioclass_occupancy(cache, high_prio_ioclass)
assert isclose(
high_prio_ioclass_occupancy, cache_size_4k, abs_tol=cache_line_size_4k
), "High priority data should occupy the whole cache"
# Write data of lower priority
for i in range(cache_size_4k, 2 * cache_size_4k):
send_io(vol, data, i * 4096, low_prio_ioclass, io_dir)
high_prio_ioclass_occupancy = get_ioclass_occupancy(cache, high_prio_ioclass)
low_prio_ioclass_occupancy = get_ioclass_occupancy(cache, low_prio_ioclass)
assert isclose(
high_prio_ioclass_occupancy, cache_size_4k, abs_tol=cache_line_size_4k
), "High priority data shouldn't be evicted"
assert low_prio_ioclass_occupancy == 0
@pytest.mark.parametrize(
("cache_mode", "io_dir"),
[
(CacheMode.WB, IoDir.READ),
(CacheMode.WT, IoDir.WRITE),
(CacheMode.WT, IoDir.READ),
],
)
@pytest.mark.parametrize("cls", [CacheLineSize.LINE_16KiB, CacheLineSize.LINE_64KiB])
def test_eviction_priority_2(pyocf_ctx, cls: CacheLineSize, cache_mode: CacheMode, io_dir: IoDir):
"""Verify if data of low priority gets evicted by high priority data"""
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core)
high_prio_ioclass = 1
low_prio_ioclass = 2
cache.configure_partition(
part_id=high_prio_ioclass,
name="high_prio",
max_size=100,
priority=1,
)
cache.configure_partition(
part_id=low_prio_ioclass,
name="low_prio",
max_size=100,
priority=2,
)
def get_ioclass_occupancy(cache, ioclass_id):
return cache.get_ioclass_stats(ioclass_id)["usage"]["occupancy"]["value"]
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size_4k = cache.get_stats()["conf"]["size"].blocks_4k
cache_line_size_4k = Size(cls).blocks_4k
data = Data(4096)
# Populate cache with low priority data
for i in range(cache_size_4k):
send_io(vol, data, i * 4096, low_prio_ioclass, io_dir)
low_prio_ioclass_occupancy = get_ioclass_occupancy(cache, low_prio_ioclass)
assert isclose(
low_prio_ioclass_occupancy, cache_size_4k, abs_tol=cache_line_size_4k
), "Low priority data should occupy the whole cache"
# Write data of higher priority
for i in range(cache_size_4k, 2 * cache_size_4k):
send_io(vol, data, i * 4096, high_prio_ioclass, io_dir)
high_prio_ioclass_occupancy = get_ioclass_occupancy(cache, high_prio_ioclass)
low_prio_ioclass_occupancy = get_ioclass_occupancy(cache, low_prio_ioclass)
assert low_prio_ioclass_occupancy == 0, "Low priority data should be evicted from cache"
assert isclose(
high_prio_ioclass_occupancy, cache_size_4k, abs_tol=cache_line_size_4k
), "High priority data should occupy the whole cache"
@pytest.mark.parametrize("io_dir", IoDir)
@pytest.mark.parametrize("cls", [CacheLineSize.LINE_16KiB, CacheLineSize.LINE_64KiB])
@pytest.mark.parametrize("cache_mode", [CacheMode.WT, CacheMode.WB])
def test_eviction_freelist(pyocf_ctx, cls: CacheLineSize, cache_mode: CacheMode, io_dir: IoDir):
"""Verify that no eviction from low priority ioclass occurs if free cachelines are avaliable"""
cache_device = RamVolume(Size.from_MiB(50))
core_device = RamVolume(Size.from_MiB(200))
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode, cache_line_size=cls)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core)
high_prio_ioclass = 1
low_prio_ioclasses = list(range(2, 33))
cache.configure_partition(
part_id=high_prio_ioclass,
name="high_prio",
max_size=100,
priority=1,
)
for low_prio_ioclass in low_prio_ioclasses:
cache.configure_partition(
part_id=low_prio_ioclass,
name=f"low_prio_{low_prio_ioclass}",
max_size=100,
priority=low_prio_ioclass * 5,
)
def get_ioclass_occupancy(cache, ioclass_id):
return cache.get_ioclass_stats(ioclass_id)["usage"]["occupancy"]["value"]
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size_4k = cache.get_stats()["conf"]["size"].blocks_4k
cache_line_size_4k = Size(cls).blocks_4k
cache_lines_written = 5
data = Data(4096 * cache_line_size_4k)
expected_occpancy_4k = (cache_lines_written * data.size) / 4096
for i, ioclass in enumerate([high_prio_ioclass] + low_prio_ioclasses):
for j in range(cache_lines_written):
addr = (cache_lines_written * i + j) * data.size
send_io(vol, data, addr, ioclass, io_dir)
assert (
get_ioclass_occupancy(cache, ioclass) == expected_occpancy_4k
), f"Doesn't match for ioclass {ioclass}"
for ioclass in [high_prio_ioclass] + low_prio_ioclasses:
assert (
get_ioclass_occupancy(cache, ioclass) == expected_occpancy_4k
), f"Doesn't match for ioclass {ioclass}"
while cache.get_stats()["usage"]["free"]["value"] > 0:
addr += data.size
send_io(vol, data, addr, high_prio_ioclass, io_dir)
assert cache.get_stats()["usage"]["occupancy"]["value"] == cache_size_4k
for ioclass in low_prio_ioclasses:
assert (
get_ioclass_occupancy(cache, ioclass) == expected_occpancy_4k
), f"Doesn't match for ioclass {ioclass}"
@pytest.mark.parametrize("cls", CacheLineSize)
def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
""" Verify if overflown pinned ioclass is evicted """
@ -169,14 +367,23 @@ def test_evict_overflown_pinned(pyocf_ctx, cls: CacheLineSize):
cache.get_partition_info(part_id=pinned_ioclass_id)["_curr_size"], cls
)
assert isclose(
part_current_size.blocks_4k, ceil(cache_size.blocks_4k * 0.1), abs_tol=Size(cls).blocks_4k,
part_current_size.blocks_4k,
ceil(cache_size.blocks_4k * 0.1),
abs_tol=Size(cls).blocks_4k,
), "Overflown part has not been evicted"
def send_io(vol: CoreVolume, data: Data, addr: int = 0, target_ioclass: int = 0):
def send_io(
vol: CoreVolume, data: Data, addr: int = 0, target_ioclass: int = 0, io_dir: IoDir = IoDir.WRITE
):
vol.open()
io = vol.new_io(
vol.parent.get_default_queue(), addr, data.size, IoDir.WRITE, target_ioclass, 0,
vol.parent.get_default_queue(),
addr,
data.size,
io_dir,
target_ioclass,
0,
)
io.set_data(data)