Merge pull request #364 from micrakow/big_req

Force pass-through for requests bigger than cache
This commit is contained in:
Robert Baldyga 2020-05-06 11:58:09 +02:00 committed by GitHub
commit 5d4da8e26c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 43 additions and 23 deletions

View File

@ -181,6 +181,11 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return; return;
} }
if (req->core_line_count > cache->conf_meta->cachelines) {
req->cache_mode = ocf_req_cache_mode_pt;
return;
}
if (ocf_core_seq_cutoff_check(core, req)) { if (ocf_core_seq_cutoff_check(core, req)) {
req->cache_mode = ocf_req_cache_mode_pt; req->cache_mode = ocf_req_cache_mode_pt;
req->seq_cutoff = 1; req->seq_cutoff = 1;

View File

@ -7,17 +7,19 @@
#include "ocf_cache_priv.h" #include "ocf_cache_priv.h"
#include "ocf_priv.h" #include "ocf_priv.h"
#include "ocf/ocf_debug.h" #include "ocf/ocf_debug.h"
#include "utils/utils_cache_line.h"
#define SEQ_CUTOFF_FULL_MARGIN \ #define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT) (OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache) static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache,
struct ocf_request *req)
{ {
if (!ocf_cache_is_device_attached(cache)) if (!ocf_cache_is_device_attached(cache))
return false; return false;
return (ocf_freelist_num_free(cache->freelist) <= return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN); SEQ_CUTOFF_FULL_MARGIN + req->core_line_count);
} }
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1, static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,
@ -101,9 +103,8 @@ bool ocf_core_seq_cutoff_check(ocf_core_t core, struct ocf_request *req)
switch (policy) { switch (policy) {
case ocf_seq_cutoff_policy_always: case ocf_seq_cutoff_policy_always:
break; break;
case ocf_seq_cutoff_policy_full: case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache)) if (ocf_seq_cutoff_is_on(cache, req))
break; break;
return false; return false;

View File

@ -204,7 +204,7 @@ static inline void _ocf_purge_cache_line_sec(struct ocf_cache *cache,
} }
/** /**
* @brief Purge cache line (remove completely, form collision, move to free * @brief Purge cache line (remove completely, from collision, move to free
* partition, from cleaning policy and eviction policy) * partition, from cleaning policy and eviction policy)
* *
* @param req - OCF request to purge * @param req - OCF request to purge

View File

@ -136,6 +136,10 @@ class Size:
def sectors(self): def sectors(self):
return self.bytes // self._SECTOR_SIZE return self.bytes // self._SECTOR_SIZE
@property
def blocks_4k(self):
return self.bytes // 4096
def __str__(self): def __str__(self):
if self.bytes < self._KiB: if self.bytes < self._KiB:
return "{} B".format(self.B) return "{} B".format(self.B)

View File

@ -21,53 +21,63 @@ logger = logging.getLogger(__name__)
@pytest.mark.parametrize("cls", CacheLineSize) @pytest.mark.parametrize("cls", CacheLineSize)
@pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO]) @pytest.mark.parametrize("mode", [CacheMode.WT, CacheMode.WB, CacheMode.WO])
@pytest.mark.xfail # TODO: remove when fixed
def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize): def test_write_size_greater_than_cache(pyocf_ctx, mode: CacheMode, cls: CacheLineSize):
"""Test if eviction does not occur when IO greater than cache size is submitted. """Test if eviction does not occur when IO greater than cache size is submitted.
""" """
cache_device = Volume(Size.from_MiB(20)) # this gives about 1.375 MiB actual caching space cache_device = Volume(Size.from_MiB(20))
core_device = Volume(Size.from_MiB(5)) core_device = Volume(Size.from_MiB(5))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache = Cache.start_on_device(cache_device, cache_mode=mode,
cache_line_size=cls) cache_line_size=cls)
cache_size = cache.get_stats()['conf']['size']
core_exported = Core.using_device(core_device) core_exported = Core.using_device(core_device)
cache.add_core(core_exported) cache.add_core(core_exported)
cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER) cache.set_seq_cut_off_policy(SeqCutOffPolicy.NEVER)
valid_io_size = Size.from_KiB(512) valid_io_size = Size.from_B(cache_size.B // 2)
test_data = Data(valid_io_size) test_data = Data(valid_io_size)
send_io(core_exported, test_data) send_io(core_exported, test_data)
stats = core_exported.cache.get_stats() stats = core_exported.cache.get_stats()
first_block_sts = stats['block']
first_usage_sts = stats['usage']
pt_writes_first = stats['req']['wr_pt']
assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\
"Occupancy after first IO" "Occupancy after first IO"
prev_writes_to_core = stats["block"]["core_volume_wr"]["value"] prev_writes_to_core = stats["block"]["core_volume_wr"]["value"]
# Anything below 5 MiB is a valid size (less than core device size) # Anything below 5 MiB is a valid size (less than core device size)
# Writing over 1.375 MiB in this case should go directly to core and shouldn't trigger eviction # Writing over cache size (to the offset above first io) in this case should go
# directly to core and shouldn't trigger eviction
io_size_bigger_than_cache = Size.from_MiB(2) io_size_bigger_than_cache = Size.from_MiB(2)
io_offset = valid_io_size
test_data = Data(io_size_bigger_than_cache) test_data = Data(io_size_bigger_than_cache)
send_io(core_exported, test_data) send_io(core_exported, test_data, io_offset)
if mode is not CacheMode.WT:
# Flush first write
cache.flush()
stats = core_exported.cache.get_stats() stats = core_exported.cache.get_stats()
second_block_sts = stats['block']
second_usage_sts = stats['usage']
pt_writes_second = stats['req']['wr_pt']
# Writes from IO greater than cache size should go directly to core # Second write shouldn't affect cache and should go directly to core.
# Writes to core should equal the following: # Cache occupancy shouldn't change
# Previous writes to core + size written + size cleaned (reads from cache) # Second IO should go in PT
assert stats["block"]["core_volume_wr"]["value"] == \ assert first_usage_sts['occupancy'] == \
stats["block"]["cache_volume_rd"]["value"] + \ second_usage_sts['occupancy']
prev_writes_to_core + io_size_bigger_than_cache.B / Size.from_KiB(4).B, \ assert pt_writes_first['value'] == 0
"Writes to core after second IO" assert pt_writes_second['value'] == 1
assert second_block_sts['cache_volume_wr']['value'] == valid_io_size.blocks_4k
# Occupancy shouldn't change (no eviction) assert second_block_sts['core_volume_wr']['value'] == valid_io_size.blocks_4k + \
assert stats["usage"]["occupancy"]["value"] == (valid_io_size.B / Size.from_KiB(4).B),\ io_size_bigger_than_cache.blocks_4k
"Occupancy after second IO"
def send_io(exported_obj: Core, data: Data): def send_io(exported_obj: Core, data: Data, addr: int = 0):
io = exported_obj.new_io( io = exported_obj.new_io(
exported_obj.cache.get_default_queue(), exported_obj.cache.get_default_queue(),
0, data.size, IoDir.WRITE, 0, 0 addr, data.size, IoDir.WRITE, 0, 0
) )
io.set_data(data) io.set_data(data)