Merge pull request #485 from arutk/core_stats_fix

Fix eviction occupancy stats decrement
This commit is contained in:
Robert Baldyga 2021-04-01 17:19:11 +02:00 committed by GitHub
commit 6603e958bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 55 additions and 24 deletions

View File

@ -11,6 +11,7 @@
#include "../concurrency/ocf_concurrency.h" #include "../concurrency/ocf_concurrency.h"
#include "../mngt/ocf_mngt_common.h" #include "../mngt/ocf_mngt_common.h"
#include "../engine/engine_zero.h" #include "../engine/engine_zero.h"
#include "../ocf_cache_priv.h"
#include "../ocf_request.h" #include "../ocf_request.h"
#include "../engine/engine_common.h" #include "../engine/engine_common.h"
@ -615,6 +616,7 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ocf_cache_line_t cline; ocf_cache_line_t cline;
uint64_t core_line; uint64_t core_line;
ocf_core_id_t core_id; ocf_core_id_t core_id;
ocf_core_t core;
ocf_cache_t cache = req->cache; ocf_cache_t cache = req->cache;
bool cl_write_lock = bool cl_write_lock =
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write); (req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
@ -666,12 +668,13 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
ocf_metadata_end_collision_shared_access( ocf_metadata_end_collision_shared_access(
cache, cline); cache, cline);
_lru_unlock_hash(&iter, core_id, core_line); core = ocf_cache_get_core(cache, core_id);
env_atomic_dec(&core->runtime_meta->cached_clines);
env_atomic_dec(&req->core->runtime_meta->cached_clines); env_atomic_dec(&core->runtime_meta->
env_atomic_dec(&req->core->runtime_meta->
part_counters[part->id].cached_clines); part_counters[part->id].cached_clines);
_lru_unlock_hash(&iter, core_id, core_line);
ocf_map_cache_line(req, req_idx, cline); ocf_map_cache_line(req, req_idx, cline);
req->map[req_idx].status = LOOKUP_REMAPPED; req->map[req_idx].status = LOOKUP_REMAPPED;

View File

@ -20,17 +20,43 @@ from pyocf.utils import Size
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.parametrize("mode", [CacheMode.WT])
def test_eviction_two_cores(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
"""Test if eviction works correctly when remapping cachelines between distinct cores."""
cache_device = Volume(Size.from_MiB(20))
core_device1 = Volume(Size.from_MiB(40))
core_device2 = Volume(Size.from_MiB(40))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
cache_size = cache.get_stats()["conf"]["size"]
core_exported1 = Core.using_device(core_device1, name="core1")
core_exported2 = Core.using_device(core_device2, name="core2")
cache.add_core(core_exported1)
cache.add_core(core_exported2)
valid_io_size = Size.from_B(cache_size.B)
test_data = Data(valid_io_size)
send_io(core_exported1, test_data)
send_io(core_exported2, test_data)
stats1 = core_exported1.get_stats()
stats2 = core_exported2.get_stats()
# IO to the second core should evict all the data from the first core
assert stats1["usage"]["occupancy"]["value"] == 0
assert stats2["usage"]["occupancy"]["value"] == valid_io_size.blocks_4k
@pytest.mark.parametrize("cls", CacheLineSize) @pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO]) @pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO])
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
"""Test if eviction does not occur when IO greater than cache size is submitted. """Test if eviction does not occur when IO greater than cache size is submitted."""
"""
cache_device = Volume(Size.from_MiB(20)) cache_device = Volume(Size.from_MiB(20))
core_device = Volume(Size.from_MiB(5)) core_device = Volume(Size.from_MiB(5))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache_line_size=cls) cache_size = cache.get_stats()["conf"]["size"]
cache_size = cache.get_stats()['conf']['size']
core_exported = Core.using_device(core_device) core_exported = Core.using_device(core_device)
cache.add_core(core_exported) cache.add_core(core_exported)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
@ -40,11 +66,12 @@ def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLin
send_io(core_exported, test_data) send_io(core_exported, test_data)
stats = core_exported.cache.get_stats() stats = core_exported.cache.get_stats()
first_block_sts = stats['block'] first_block_sts = stats["block"]
first_usage_sts = stats['usage'] first_usage_sts = stats["usage"]
pt_writes_first = stats['req']['wr_pt'] pt_writes_first = stats["req"]["wr_pt"]
assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ assert stats["usage"]["occupancy"]["value"] == (
"Occupancy after first IO" valid_io_size.B / Size.from_KiB(4).B
), "Occupancy after first IO"
prev_writes_to_core = stats["block"]["core_volume_wr"]["value"] prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]
# Anything below 5 MiB is a valid size (less than core device size) # Anything below 5 MiB is a valid size (less than core device size)
@ -59,20 +86,21 @@ def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLin
# Flush first write # Flush first write
cache.flush() cache.flush()
stats = core_exported.cache.get_stats() stats = core_exported.cache.get_stats()
second_block_sts = stats['block'] second_block_sts = stats["block"]
second_usage_sts = stats['usage'] second_usage_sts = stats["usage"]
pt_writes_second = stats['req']['wr_pt'] pt_writes_second = stats["req"]["wr_pt"]
# Second write shouldn't affect cache and should go directly to core. # Second write shouldn't affect cache and should go directly to core.
# Cache occupancy shouldn't change # Cache occupancy shouldn't change
# Second IO should go in PT # Second IO should go in PT
assert first_usage_sts['occupancy'] == \ assert first_usage_sts["occupancy"] == second_usage_sts["occupancy"]
second_usage_sts['occupancy'] assert pt_writes_first["value"] == 0
assert pt_writes_first['value'] == 0 assert pt_writes_second["value"] == 1
assert pt_writes_second['value'] == 1 assert second_block_sts["cache_volume_wr"]["value"] == valid_io_size.blocks_4k
assert second_block_sts['cache_volume_wr']['value'] == valid_io_size.blocks_4k assert (
assert second_block_sts['core_volume_wr']['value'] == valid_io_size.blocks_4k + \ second_block_sts["core_volume_wr"]["value"]
io_size_bigger_than_cache.blocks_4k == valid_io_size.blocks_4k + io_size_bigger_than_cache.blocks_4k
)
@pytest.mark.parametrize("cls", CacheLineSize) @pytest.mark.parametrize("cls", CacheLineSize)