commit
be628b4088
@ -162,6 +162,9 @@ typedef enum {
|
||||
ocf_cache_mode_wi,
|
||||
/*!< Write invalidate cache mode */
|
||||
|
||||
ocf_cache_mode_wo,
|
||||
/*!< Write-only cache mode */
|
||||
|
||||
ocf_cache_mode_max,
|
||||
/*!< Stopper of cache mode enumerator */
|
||||
|
||||
|
@ -695,6 +695,17 @@ typedef void (*ocf_mngt_cache_save_end_t)(ocf_cache_t cache,
|
||||
void ocf_mngt_cache_save(ocf_cache_t cache,
|
||||
ocf_mngt_cache_save_end_t cmpl, void *priv);
|
||||
|
||||
/**
|
||||
* @brief Determines whether given cache mode has write-back semantics, i.e. it
|
||||
* allows for writes to be serviced in cache and lazily propagated to core.
|
||||
*
|
||||
* @param[in] mode input cache mode
|
||||
*/
|
||||
static inline bool ocf_mngt_cache_mode_has_lazy_write(ocf_cache_mode_t mode)
|
||||
{
|
||||
return mode == ocf_cache_mode_wb || mode == ocf_cache_mode_wo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set cache mode in given cache
|
||||
*
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "engine_wi.h"
|
||||
#include "engine_wa.h"
|
||||
#include "engine_wb.h"
|
||||
#include "engine_wo.h"
|
||||
#include "engine_fast.h"
|
||||
#include "engine_discard.h"
|
||||
#include "engine_d2c.h"
|
||||
@ -32,6 +33,7 @@ enum ocf_io_if_type {
|
||||
OCF_IO_WA_IF,
|
||||
OCF_IO_WI_IF,
|
||||
OCF_IO_PT_IF,
|
||||
OCF_IO_WO_IF,
|
||||
OCF_IO_MAX_IF,
|
||||
|
||||
/* Private OCF interfaces */
|
||||
@ -68,6 +70,11 @@ static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
|
||||
.write = ocf_write_wi,
|
||||
.name = "Pass Through",
|
||||
},
|
||||
[OCF_IO_WO_IF] = {
|
||||
.read = ocf_read_wo,
|
||||
.write = ocf_write_wb,
|
||||
.name = "Write Only",
|
||||
},
|
||||
[OCF_IO_FAST_IF] = {
|
||||
.read = ocf_read_fast,
|
||||
.write = ocf_write_fast,
|
||||
@ -95,6 +102,7 @@ static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = {
|
||||
[ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF],
|
||||
[ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF],
|
||||
[ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF],
|
||||
[ocf_req_cache_mode_wo] = &IO_IFS[OCF_IO_WO_IF],
|
||||
[ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF],
|
||||
[ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF],
|
||||
[ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF],
|
||||
|
@ -20,6 +20,7 @@ typedef enum {
|
||||
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
|
||||
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
|
||||
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
|
||||
ocf_req_cache_mode_wo = ocf_cache_mode_wo,
|
||||
|
||||
/* internal modes */
|
||||
ocf_req_cache_mode_fast,
|
||||
@ -58,6 +59,13 @@ static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
|
||||
return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max;
|
||||
}
|
||||
|
||||
static inline bool ocf_req_cache_mode_has_lazy_write(ocf_req_cache_mode_t mode)
|
||||
{
|
||||
return ocf_cache_mode_is_valid((ocf_cache_mode_t)mode) &&
|
||||
ocf_mngt_cache_mode_has_lazy_write(
|
||||
(ocf_cache_mode_t)mode);
|
||||
}
|
||||
|
||||
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
|
||||
|
||||
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
|
||||
|
@ -84,8 +84,8 @@ static int _ocf_backfill_do(struct ocf_request *req)
|
||||
|
||||
req->data = req->cp_data;
|
||||
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_WRITE, reqs_to_issue,
|
||||
_ocf_backfill_complete);
|
||||
ocf_submit_cache_reqs(req->cache, req, OCF_WRITE, 0, req->byte_length,
|
||||
reqs_to_issue, _ocf_backfill_complete);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -95,15 +95,8 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
|
||||
uint8_t end_sector = ocf_line_end_sector(cache);
|
||||
struct ocf_map_info *_entry = &(req->map[entry]);
|
||||
|
||||
if (entry == 0) {
|
||||
start_sector = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (entry == req->core_line_count - 1) {
|
||||
end_sector = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1)% ocf_line_sectors(cache);
|
||||
}
|
||||
start_sector = ocf_map_line_start_sector(req, entry);
|
||||
end_sector = ocf_map_line_end_sector(req, entry);
|
||||
|
||||
/* Handle return value */
|
||||
switch (_entry->status) {
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define ENGINE_COMMON_H_
|
||||
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
|
||||
/**
|
||||
* @file engine_common.h
|
||||
@ -109,6 +110,37 @@ static inline uint32_t ocf_engine_io_count(struct ocf_request *req)
|
||||
return req->info.seq_req ? 1 : req->core_line_count;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool ocf_engine_map_all_sec_dirty(struct ocf_request *req, uint32_t line)
|
||||
{
|
||||
uint8_t start = ocf_map_line_start_sector(req, line);
|
||||
uint8_t end = ocf_map_line_end_sector(req, line);
|
||||
|
||||
if (req->map[line].status != LOOKUP_HIT)
|
||||
return false;
|
||||
|
||||
return metadata_test_dirty_all_sec(req->cache, req->map[line].coll_idx,
|
||||
start, end);
|
||||
}
|
||||
|
||||
static inline
|
||||
bool ocf_engine_map_all_sec_clean(struct ocf_request *req, uint32_t line)
|
||||
{
|
||||
uint8_t start = ocf_map_line_start_sector(req, line);
|
||||
uint8_t end = ocf_map_line_end_sector(req, line);
|
||||
|
||||
if (req->map[line].status != LOOKUP_HIT)
|
||||
return false;
|
||||
|
||||
if (!metadata_test_valid_sec(req->cache, req->map[line].coll_idx,
|
||||
start, end)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !metadata_test_dirty_sec(req->cache, req->map[line].coll_idx,
|
||||
start, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Clean request (flush dirty data to the core device)
|
||||
*
|
||||
|
@ -87,7 +87,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
|
||||
/* Submit IO */
|
||||
OCF_DEBUG_RQ(req, "Submit");
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
|
||||
ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
|
||||
ocf_engine_io_count(req), _ocf_read_fast_complete);
|
||||
|
||||
|
||||
|
@ -51,7 +51,7 @@ int ocf_engine_ops(struct ocf_request *req)
|
||||
ocf_submit_volume_req(&req->core->volume, req,
|
||||
_ocf_engine_ops_complete);
|
||||
|
||||
ocf_submit_cache_reqs(cache, req->map, req, req->rw,
|
||||
ocf_submit_cache_reqs(cache, req, req->rw, 0, req->byte_length,
|
||||
1, _ocf_engine_ops_complete);
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
|
@ -105,7 +105,7 @@ static inline void _ocf_read_generic_submit_hit(struct ocf_request *req)
|
||||
{
|
||||
env_atomic_set(&req->req_remaining, ocf_engine_io_count(req));
|
||||
|
||||
ocf_submit_cache_reqs(req->cache, req->map, req, OCF_READ,
|
||||
ocf_submit_cache_reqs(req->cache, req, OCF_READ, 0, req->byte_length,
|
||||
ocf_engine_io_count(req), _ocf_read_generic_hit_complete);
|
||||
}
|
||||
|
||||
@ -135,7 +135,7 @@ err_alloc:
|
||||
_ocf_read_generic_miss_complete(req, -OCF_ERR_NO_MEM);
|
||||
}
|
||||
|
||||
static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
int ocf_read_generic_do(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
|
||||
@ -195,7 +195,7 @@ static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
else
|
||||
_ocf_read_generic_submit_miss(req);
|
||||
|
||||
/* Updata statistics */
|
||||
/* Update statistics */
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
@ -206,8 +206,8 @@ static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_read_generic_resume = {
|
||||
.read = _ocf_read_generic_do,
|
||||
.write = _ocf_read_generic_do,
|
||||
.read = ocf_read_generic_do,
|
||||
.write = ocf_read_generic_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
@ -294,7 +294,7 @@ int ocf_read_generic(struct ocf_request *req)
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
_ocf_read_generic_do(req);
|
||||
ocf_read_generic_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
|
@ -8,4 +8,6 @@
|
||||
|
||||
int ocf_read_generic(struct ocf_request *req);
|
||||
|
||||
int ocf_read_generic_do(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_RD_H_ */
|
||||
|
@ -141,7 +141,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
|
||||
OCF_DEBUG_RQ(req, "Submit Data");
|
||||
|
||||
/* Data IO */
|
||||
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
|
||||
ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
|
||||
ocf_engine_io_count(req), _ocf_write_wb_complete);
|
||||
}
|
||||
|
||||
|
258
src/engine/engine_wo.c
Normal file
258
src/engine/engine_wo.c
Normal file
@ -0,0 +1,258 @@
|
||||
/*
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf/ocf.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "cache_engine.h"
|
||||
#include "engine_common.h"
|
||||
#include "engine_rd.h"
|
||||
#include "engine_pt.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wo"
|
||||
#include "engine_debug.h"
|
||||
|
||||
static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
env_atomic_inc(&req->core->counters->cache_errors.read);
|
||||
req->error |= error;
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&req->req_remaining))
|
||||
return;
|
||||
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
|
||||
if (req->error)
|
||||
ocf_engine_error(req, true, "Failed to read data from cache");
|
||||
|
||||
ocf_req_unlock_rd(req);
|
||||
|
||||
/* Complete request */
|
||||
req->complete(req, req->error);
|
||||
|
||||
/* Release OCF request */
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
static void ocf_read_wo_cache_io(struct ocf_request *req, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
OCF_DEBUG_RQ(req, "Submit cache");
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
ocf_submit_cache_reqs(req->cache, req, OCF_READ, offset, size, 1,
|
||||
ocf_read_wo_cache_complete);
|
||||
}
|
||||
|
||||
static int ocf_read_wo_cache_do(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
uint32_t s, e, i;
|
||||
uint64_t line;
|
||||
struct ocf_map_info *entry;
|
||||
bool dirty = false;
|
||||
bool io = false;
|
||||
uint64_t phys_prev, phys_curr = 0;
|
||||
uint64_t io_start = 0;
|
||||
uint64_t offset = 0;
|
||||
uint64_t increment;
|
||||
|
||||
env_atomic_set(&req->req_remaining, 1);
|
||||
|
||||
for (line = 0; line < req->core_line_count; ++line) {
|
||||
entry = &req->map[line];
|
||||
s = ocf_map_line_start_sector(req, line);
|
||||
e = ocf_map_line_end_sector(req, line);
|
||||
|
||||
/* if cacheline mapping is not sequential, send cache IO to
|
||||
* previous cacheline(s) */
|
||||
phys_prev = phys_curr;
|
||||
if (entry->status != LOOKUP_MISS)
|
||||
phys_curr = ocf_metadata_map_lg2phy(cache,
|
||||
entry->coll_idx);
|
||||
if (io && phys_prev + 1 != phys_curr) {
|
||||
ocf_read_wo_cache_io(req, io_start, offset - io_start);
|
||||
io = false;
|
||||
}
|
||||
|
||||
/* try to seek directly to the last sector */
|
||||
if (entry->status == LOOKUP_MISS ||
|
||||
ocf_engine_map_all_sec_clean(req, line)) {
|
||||
/* all sectors invalid or clean */
|
||||
i = e + 1;
|
||||
increment = SECTORS_TO_BYTES(e - s + 1);
|
||||
dirty = false;
|
||||
}
|
||||
else if (ocf_engine_map_all_sec_dirty(req, line)) {
|
||||
/* all sectors dirty */
|
||||
i = e + 1;
|
||||
increment = SECTORS_TO_BYTES(e - s + 1);
|
||||
dirty = true;
|
||||
} else {
|
||||
/* need to iterate through CL sector by sector */
|
||||
i = s;
|
||||
}
|
||||
|
||||
do {
|
||||
if (i <= e) {
|
||||
dirty = metadata_test_dirty_one(cache,
|
||||
entry->coll_idx, i);
|
||||
increment = 0;
|
||||
do {
|
||||
++i;
|
||||
increment += SECTORS_TO_BYTES(1);
|
||||
} while (i <= e && metadata_test_dirty_one(
|
||||
cache, entry->coll_idx, i)
|
||||
== dirty);
|
||||
}
|
||||
|
||||
if (io && !dirty) {
|
||||
/* end of sequential dirty region */
|
||||
ocf_read_wo_cache_io(req, io_start,
|
||||
offset - io_start);
|
||||
io = false;
|
||||
}
|
||||
|
||||
if (!io && dirty) {
|
||||
/* beginning of sequential dirty region */
|
||||
io = true;
|
||||
io_start = offset;
|
||||
}
|
||||
|
||||
offset += increment;
|
||||
} while (i <= e);
|
||||
}
|
||||
|
||||
if (io)
|
||||
ocf_read_wo_cache_io(req, io_start, offset - io_start);
|
||||
|
||||
ocf_read_wo_cache_complete(req, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wo_cache_read = {
|
||||
.read = ocf_read_wo_cache_do,
|
||||
.write = ocf_read_wo_cache_do,
|
||||
};
|
||||
|
||||
static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
|
||||
{
|
||||
if (error) {
|
||||
req->error |= error;
|
||||
req->info.core_error = 1;
|
||||
env_atomic_inc(&req->core->counters->core_errors.read);
|
||||
}
|
||||
|
||||
/* if all mapped cachelines are clean, the data we've read from core
|
||||
* is valid and we can complete the request */
|
||||
if (!req->info.dirty_any || req->error) {
|
||||
OCF_DEBUG_RQ(req, "Completion");
|
||||
req->complete(req, req->error);
|
||||
ocf_req_unlock_rd(req);
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
|
||||
req->io_if = &_io_if_wo_cache_read;
|
||||
ocf_engine_push_req_front(req, true);
|
||||
}
|
||||
|
||||
int ocf_read_wo_do(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
|
||||
if (ocf_engine_is_hit(req)) {
|
||||
/* read hit - just fetch the data from cache using standard read path
|
||||
*/
|
||||
ocf_read_generic_do(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ocf_req_get(req);
|
||||
|
||||
if (req->info.re_part) {
|
||||
OCF_DEBUG_RQ(req, "Re-Part");
|
||||
|
||||
OCF_METADATA_LOCK_WR();
|
||||
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
}
|
||||
|
||||
OCF_DEBUG_RQ(req, "Submit core");
|
||||
ocf_submit_volume_req(&req->core->volume, req, _ocf_read_wo_core_complete);
|
||||
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ocf_io_if _io_if_wo_resume = {
|
||||
.read = ocf_read_wo_do,
|
||||
.write = ocf_read_wo_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_read_wo(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
int lock = OCF_LOCK_ACQUIRED;
|
||||
|
||||
OCF_DEBUG_TRACE(req->cache);
|
||||
|
||||
ocf_io_start(req->io);
|
||||
|
||||
/* Get OCF request - increase reference counter */
|
||||
ocf_req_get(req);
|
||||
|
||||
/* Set resume call backs */
|
||||
req->io_if = &_io_if_wo_resume;
|
||||
|
||||
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
|
||||
|
||||
/* Traverse request to check if there are mapped cache lines */
|
||||
ocf_engine_traverse(req);
|
||||
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* There are mapped cache lines,
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
|
||||
|
||||
if (lock >= 0) {
|
||||
if (lock != OCF_LOCK_ACQUIRED) {
|
||||
/* Lock was not acquired, need to wait for resume */
|
||||
OCF_DEBUG_RQ(req, "NO LOCK");
|
||||
} else {
|
||||
/* Lock was acquired can perform IO */
|
||||
ocf_read_wo_do(req);
|
||||
}
|
||||
} else {
|
||||
OCF_DEBUG_RQ(req, "LOCK ERROR %d", lock);
|
||||
req->complete(req, lock);
|
||||
ocf_req_put(req);
|
||||
}
|
||||
|
||||
/* Put OCF request - decrease reference counter */
|
||||
ocf_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
11
src/engine/engine_wo.h
Normal file
11
src/engine/engine_wo.h
Normal file
@ -0,0 +1,11 @@
|
||||
/*
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef ENGINE_WO_H_
|
||||
#define ENGINE_WO_H_
|
||||
|
||||
int ocf_read_wo(struct ocf_request *req);
|
||||
|
||||
#endif /* ENGINE_WO_H_ */
|
@ -87,7 +87,7 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req)
|
||||
}
|
||||
|
||||
/* To cache */
|
||||
ocf_submit_cache_reqs(cache, req->map, req, OCF_WRITE,
|
||||
ocf_submit_cache_reqs(cache, req, OCF_WRITE, 0, req->byte_length,
|
||||
ocf_engine_io_count(req), _ocf_write_wt_cache_complete);
|
||||
|
||||
/* To core */
|
||||
@ -145,7 +145,7 @@ static int _ocf_write_wt_do(struct ocf_request *req)
|
||||
/* Submit IO */
|
||||
_ocf_write_wt_submit(req);
|
||||
|
||||
/* Updata statistics */
|
||||
/* Update statistics */
|
||||
ocf_engine_update_request_stats(req);
|
||||
ocf_engine_update_block_stats(req);
|
||||
|
||||
|
@ -1600,6 +1600,7 @@ static const char *_ocf_cache_mode_names[ocf_cache_mode_max] = {
|
||||
[ocf_cache_mode_wa] = "wa",
|
||||
[ocf_cache_mode_pt] = "pt",
|
||||
[ocf_cache_mode_wi] = "wi",
|
||||
[ocf_cache_mode_wo] = "wo",
|
||||
};
|
||||
|
||||
static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode)
|
||||
@ -2146,8 +2147,10 @@ static int _cache_mngt_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
|
||||
|
||||
cache->conf_meta->cache_mode = mode;
|
||||
|
||||
if (mode_old == ocf_cache_mode_wb)
|
||||
if (ocf_mngt_cache_mode_has_lazy_write(mode_old) &&
|
||||
!ocf_mngt_cache_mode_has_lazy_write(mode)) {
|
||||
_cache_mngt_update_initial_dirty_clines(cache);
|
||||
}
|
||||
|
||||
ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
|
||||
"successful\n", ocf_get_io_iface_name(mode_old),
|
||||
|
@ -251,7 +251,9 @@ void ocf_core_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode)
|
||||
|
||||
if (cache_mode == ocf_cache_mode_none)
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
if (req_cache_mode == ocf_req_cache_mode_wb &&
|
||||
|
||||
if (io->dir == OCF_WRITE &&
|
||||
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
|
||||
ocf_io_set_dirty(cache, core_io)) {
|
||||
req_cache_mode = ocf_req_cache_mode_wt;
|
||||
}
|
||||
@ -319,7 +321,9 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
|
||||
}
|
||||
|
||||
req_cache_mode = ocf_get_effective_cache_mode(cache, core, io);
|
||||
if (req_cache_mode == ocf_req_cache_mode_wb &&
|
||||
|
||||
if (io->dir == OCF_WRITE &&
|
||||
ocf_req_cache_mode_has_lazy_write(req_cache_mode) &&
|
||||
ocf_io_set_dirty(cache, core_io)) {
|
||||
req_cache_mode = ocf_req_cache_mode_wt;
|
||||
}
|
||||
@ -328,6 +332,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
|
||||
case ocf_req_cache_mode_pt:
|
||||
return -OCF_ERR_IO;
|
||||
case ocf_req_cache_mode_wb:
|
||||
case ocf_req_cache_mode_wo:
|
||||
req_cache_mode = ocf_req_cache_mode_fast;
|
||||
break;
|
||||
default:
|
||||
|
@ -144,7 +144,7 @@ struct ocf_request {
|
||||
/*!< Copy of request data */
|
||||
|
||||
uint64_t byte_position;
|
||||
/*!< LBA byte position of request in code domain */
|
||||
/*!< LBA byte position of request in core domain */
|
||||
|
||||
uint64_t core_line_first;
|
||||
/*! First core line */
|
||||
|
@ -229,6 +229,29 @@ static inline void ocf_purge_map_info(struct ocf_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
uint8_t ocf_map_line_start_sector(struct ocf_request *req, uint32_t line)
|
||||
{
|
||||
if (line == 0) {
|
||||
return BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(req->cache);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
uint8_t ocf_map_line_end_sector(struct ocf_request *req, uint32_t line)
|
||||
{
|
||||
if (line == req->core_line_count - 1) {
|
||||
return BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(req->cache);
|
||||
}
|
||||
|
||||
return ocf_line_end_sector(req->cache);
|
||||
}
|
||||
|
||||
static inline void ocf_set_valid_map_info(struct ocf_request *req)
|
||||
{
|
||||
uint32_t map_idx = 0;
|
||||
@ -244,27 +267,11 @@ static inline void ocf_set_valid_map_info(struct ocf_request *req)
|
||||
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
|
||||
* | first | Middle | last |
|
||||
*/
|
||||
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
ENV_BUG_ON(map[map_idx].status == LOOKUP_MISS);
|
||||
|
||||
start_bit = 0;
|
||||
end_bit = ocf_line_end_sector(cache);
|
||||
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
|
||||
set_cache_line_valid(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
@ -286,24 +293,8 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req)
|
||||
*/
|
||||
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
start_bit = 0;
|
||||
end_bit = ocf_line_end_sector(cache);
|
||||
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
set_cache_line_dirty(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
}
|
||||
@ -324,25 +315,8 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req)
|
||||
*/
|
||||
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
start_bit = 0;
|
||||
end_bit = ocf_line_end_sector(cache);
|
||||
|
||||
if (map_idx == 0) {
|
||||
/* First */
|
||||
|
||||
start_bit = BYTES_TO_SECTORS(req->byte_position)
|
||||
% ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
if (map_idx == (count - 1)) {
|
||||
/* Last */
|
||||
|
||||
end_bit = BYTES_TO_SECTORS(req->byte_position +
|
||||
req->byte_length - 1) %
|
||||
ocf_line_sectors(cache);
|
||||
|
||||
}
|
||||
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
set_cache_line_clean(cache, start_bit, end_bit, req, map_idx);
|
||||
}
|
||||
}
|
||||
|
@ -226,16 +226,22 @@ static void ocf_submit_volume_req_cmpl(struct ocf_io *io, int error)
|
||||
}
|
||||
|
||||
void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
struct ocf_map_info *map_info, struct ocf_request *req, int dir,
|
||||
unsigned int reqs, ocf_req_end_t callback)
|
||||
struct ocf_request *req, int dir, uint64_t offset,
|
||||
uint64_t size, unsigned int reqs, ocf_req_end_t callback)
|
||||
{
|
||||
struct ocf_counters_block *cache_stats;
|
||||
uint64_t flags = req->io ? req->io->flags : 0;
|
||||
uint32_t class = req->io ? req->io->io_class : 0;
|
||||
uint64_t addr, bytes, total_bytes = 0;
|
||||
struct ocf_io *io;
|
||||
uint32_t i;
|
||||
int err;
|
||||
uint32_t i;
|
||||
uint32_t entry = ocf_bytes_2_lines(cache, req->byte_position + offset) -
|
||||
ocf_bytes_2_lines(cache, req->byte_position);
|
||||
struct ocf_map_info *map_info = &req->map[entry];
|
||||
|
||||
ENV_BUG_ON(req->byte_length < offset + size);
|
||||
ENV_BUG_ON(entry + reqs > req->core_line_count);
|
||||
|
||||
cache_stats = &req->core->counters->cache_blocks;
|
||||
|
||||
@ -250,14 +256,14 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
map_info[0].coll_idx);
|
||||
addr *= ocf_line_size(cache);
|
||||
addr += cache->device->metadata_offset;
|
||||
addr += (req->byte_position % ocf_line_size(cache));
|
||||
bytes = req->byte_length;
|
||||
addr += ((req->byte_position + offset) % ocf_line_size(cache));
|
||||
bytes = size;
|
||||
|
||||
ocf_io_configure(io, addr, bytes, dir, class, flags);
|
||||
ocf_io_set_queue(io, req->io_queue);
|
||||
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
|
||||
|
||||
err = ocf_io_set_data(io, req->data, 0);
|
||||
err = ocf_io_set_data(io, req->data, offset);
|
||||
if (err) {
|
||||
ocf_io_put(io);
|
||||
callback(req, err);
|
||||
@ -265,7 +271,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
ocf_volume_submit_io(io);
|
||||
total_bytes = req->byte_length;
|
||||
total_bytes = bytes;
|
||||
|
||||
goto update_stats;
|
||||
}
|
||||
@ -288,24 +294,27 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
bytes = ocf_line_size(cache);
|
||||
|
||||
if (i == 0) {
|
||||
uint64_t seek = (req->byte_position %
|
||||
uint64_t seek = ((req->byte_position + offset) %
|
||||
ocf_line_size(cache));
|
||||
|
||||
addr += seek;
|
||||
bytes -= seek;
|
||||
} else if (i == (reqs - 1)) {
|
||||
uint64_t skip = (ocf_line_size(cache) -
|
||||
((req->byte_position + req->byte_length) %
|
||||
((req->byte_position + offset + size) %
|
||||
ocf_line_size(cache))) % ocf_line_size(cache);
|
||||
|
||||
bytes -= skip;
|
||||
}
|
||||
|
||||
bytes = OCF_MIN(bytes, size - total_bytes);
|
||||
ENV_BUG_ON(bytes == 0);
|
||||
|
||||
ocf_io_configure(io, addr, bytes, dir, class, flags);
|
||||
ocf_io_set_queue(io, req->io_queue);
|
||||
ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
|
||||
|
||||
err = ocf_io_set_data(io, req->data, total_bytes);
|
||||
err = ocf_io_set_data(io, req->data, offset + total_bytes);
|
||||
if (err) {
|
||||
ocf_io_put(io);
|
||||
/* Finish all IOs which left with ERROR */
|
||||
@ -317,6 +326,8 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
total_bytes += bytes;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(total_bytes != size);
|
||||
|
||||
update_stats:
|
||||
if (dir == OCF_WRITE)
|
||||
env_atomic64_add(total_bytes, &cache_stats->write_bytes);
|
||||
|
@ -60,10 +60,9 @@ void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
|
||||
void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
|
||||
ocf_req_end_t callback);
|
||||
|
||||
|
||||
void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
struct ocf_map_info *map_info, struct ocf_request *req, int dir,
|
||||
unsigned int reqs, ocf_req_end_t callback);
|
||||
struct ocf_request *req, int dir, uint64_t offset,
|
||||
uint64_t size, unsigned int reqs, ocf_req_end_t callback);
|
||||
|
||||
static inline struct ocf_io *ocf_new_cache_io(struct ocf_cache *cache)
|
||||
{
|
||||
|
@ -74,8 +74,17 @@ class CacheMode(IntEnum):
|
||||
WA = 2
|
||||
PT = 3
|
||||
WI = 4
|
||||
WO = 5
|
||||
DEFAULT = WT
|
||||
|
||||
def lazy_write(self):
|
||||
return self.value in [CacheMode.WB, CacheMode.WO]
|
||||
|
||||
def write_insert(self):
|
||||
return self.value not in [CacheMode.PT, CacheMode.WA, CacheMode.WI]
|
||||
|
||||
def read_insert(self):
|
||||
return self.value not in [CacheMode.PT, CacheMode.WO]
|
||||
|
||||
class EvictionPolicy(IntEnum):
|
||||
LRU = 0
|
||||
|
@ -21,7 +21,7 @@ from enum import IntEnum
|
||||
from hashlib import md5
|
||||
import weakref
|
||||
|
||||
from ..utils import print_buffer
|
||||
from ..utils import print_buffer, Size as S
|
||||
|
||||
|
||||
class DataSeek(IntEnum):
|
||||
@ -96,16 +96,24 @@ class Data:
|
||||
return cls(pages * Data.PAGE_SIZE)
|
||||
|
||||
@classmethod
|
||||
def from_bytes(cls, source: bytes):
|
||||
d = cls(len(source))
|
||||
def from_bytes(cls, source: bytes, offset: int = 0, size: int = 0):
|
||||
if size == 0:
|
||||
size = len(source) - offset
|
||||
d = cls(size)
|
||||
|
||||
memmove(d.handle, cast(source, c_void_p), len(source))
|
||||
memmove(d.handle, cast(source, c_void_p).value + offset, size)
|
||||
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, source: str, encoding: str = "ascii"):
|
||||
return cls.from_bytes(bytes(source, encoding))
|
||||
b = bytes(source, encoding)
|
||||
# duplicate string to fill space up to sector boundary
|
||||
padding_len = S.from_B(len(b), sector_aligned = True).B - len(b)
|
||||
padding = b * (padding_len // len(b) + 1)
|
||||
padding = padding[:padding_len]
|
||||
b = b + padding
|
||||
return cls.from_bytes(b)
|
||||
|
||||
@staticmethod
|
||||
@DataOps.ALLOC
|
||||
|
@ -108,9 +108,9 @@ class Io(Structure):
|
||||
byref(self), addr, length, direction, io_class, flags
|
||||
)
|
||||
|
||||
def set_data(self, data: Data):
|
||||
def set_data(self, data: Data, offset: int = 0):
|
||||
self.data = data
|
||||
OcfLib.getInstance().ocf_io_set_data_wrapper(byref(self), data, 0)
|
||||
OcfLib.getInstance().ocf_io_set_data_wrapper(byref(self), data, offset)
|
||||
|
||||
def set_queue(self, queue: Queue):
|
||||
OcfLib.getInstance().ocf_io_set_queue_wrapper(byref(self), queue.handle)
|
||||
|
@ -70,7 +70,7 @@ class VolumeProperties(Structure):
|
||||
|
||||
|
||||
class VolumeIoPriv(Structure):
|
||||
_fields_ = [("_data", c_void_p)]
|
||||
_fields_ = [("_data", c_void_p), ("_offset", c_uint64)]
|
||||
|
||||
|
||||
class Volume(Structure):
|
||||
@ -220,7 +220,7 @@ class Volume(Structure):
|
||||
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
|
||||
)
|
||||
data = Data.get_instance(data)
|
||||
data.position = offset
|
||||
io_priv.contents._offset = offset
|
||||
io_priv.contents._data = data.handle
|
||||
|
||||
return 0
|
||||
@ -268,16 +268,22 @@ class Volume(Structure):
|
||||
try:
|
||||
self.stats[IoDir(io.contents._dir)] += 1
|
||||
|
||||
io_priv = cast(
|
||||
OcfLib.getInstance().ocf_io_get_priv(io), POINTER(VolumeIoPriv)
|
||||
)
|
||||
offset = io_priv.contents._offset
|
||||
|
||||
if io.contents._dir == IoDir.WRITE:
|
||||
src_ptr = cast(io.contents._ops.contents._get_data(io), c_void_p)
|
||||
src = Data.get_instance(src_ptr.value)
|
||||
src = Data.get_instance(src_ptr.value).handle.value + offset
|
||||
dst = self._storage + io.contents._addr
|
||||
elif io.contents._dir == IoDir.READ:
|
||||
dst_ptr = cast(io.contents._ops.contents._get_data(io), c_void_p)
|
||||
dst = Data.get_instance(dst_ptr.value)
|
||||
dst = Data.get_instance(dst_ptr.value).handle.value + offset
|
||||
src = self._storage + io.contents._addr
|
||||
|
||||
memmove(dst, src, io.contents._bytes)
|
||||
io_priv.contents._offset += io.contents._bytes
|
||||
|
||||
io.contents._end(io, 0)
|
||||
except:
|
||||
|
@ -56,9 +56,13 @@ class Size:
|
||||
_MiB = _KiB * 1024
|
||||
_GiB = _MiB * 1024
|
||||
_TiB = _GiB * 1024
|
||||
_SECTOR_SIZE = 512
|
||||
|
||||
def __init__(self, b: int):
|
||||
self.bytes = b
|
||||
def __init__(self, b: int, sector_aligned: bool = False):
|
||||
if sector_aligned:
|
||||
self.bytes = ((b + self._SECTOR_SIZE - 1) // self._SECTOR_SIZE) * self._SECTOR_SIZE
|
||||
else:
|
||||
self.bytes = b
|
||||
|
||||
def __int__(self):
|
||||
return self.bytes
|
||||
@ -67,24 +71,28 @@ class Size:
|
||||
return self.bytes
|
||||
|
||||
@classmethod
|
||||
def from_B(cls, value):
|
||||
return cls(value)
|
||||
def from_B(cls, value, sector_aligned = False):
|
||||
return cls(value, sector_aligned)
|
||||
|
||||
@classmethod
|
||||
def from_KiB(cls, value):
|
||||
return cls(value * cls._KiB)
|
||||
def from_KiB(cls, value, sector_aligned = False):
|
||||
return cls(value * cls._KiB, sector_aligned)
|
||||
|
||||
@classmethod
|
||||
def from_MiB(cls, value):
|
||||
return cls(value * cls._MiB)
|
||||
def from_MiB(cls, value, sector_aligned = False):
|
||||
return cls(value * cls._MiB, sector_aligned)
|
||||
|
||||
@classmethod
|
||||
def from_GiB(cls, value):
|
||||
return cls(value * cls._GiB)
|
||||
def from_GiB(cls, value, sector_aligned = False):
|
||||
return cls(value * cls._GiB, sector_aligned)
|
||||
|
||||
@classmethod
|
||||
def from_TiB(cls, value):
|
||||
return cls(value * cls._TiB)
|
||||
def from_TiB(cls, value, sector_aligned = False):
|
||||
return cls(value * cls._TiB, sector_aligned)
|
||||
|
||||
@classmethod
|
||||
def from_sector(cls, value):
|
||||
return cls(value * cls._SECTOR_SIZE)
|
||||
|
||||
@property
|
||||
def B(self):
|
||||
@ -106,6 +114,10 @@ class Size:
|
||||
def TiB(self):
|
||||
return self.bytes / self._TiB
|
||||
|
||||
@property
|
||||
def sectors(self):
|
||||
return self.bytes // _SECTOR_SIZE
|
||||
|
||||
def __str__(self):
|
||||
if self.bytes < self._KiB:
|
||||
return "{} B".format(self.B)
|
||||
|
187
tests/functional/tests/engine/test_wo.py
Normal file
187
tests/functional/tests/engine/test_wo.py
Normal file
@ -0,0 +1,187 @@
|
||||
#
|
||||
# Copyright(c) 2019 Intel Corporation
|
||||
# SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
#
|
||||
|
||||
import pytest
|
||||
from ctypes import c_int, memmove, cast, c_void_p
|
||||
from enum import IntEnum
|
||||
from itertools import product
|
||||
import random
|
||||
|
||||
from pyocf.types.cache import Cache, CacheMode
|
||||
from pyocf.types.core import Core
|
||||
from pyocf.types.volume import Volume, ErrorDevice
|
||||
from pyocf.types.data import Data
|
||||
from pyocf.types.io import IoDir
|
||||
from pyocf.utils import Size
|
||||
from pyocf.types.shared import OcfError, OcfCompletion
|
||||
|
||||
def __io(io, queue, address, size, data, direction):
|
||||
io.set_data(data, 0)
|
||||
io.configure(address, size, direction, 0, 0)
|
||||
io.set_queue(queue)
|
||||
completion = OcfCompletion([("err", c_int)])
|
||||
io.callback = completion.callback
|
||||
io.submit()
|
||||
completion.wait()
|
||||
return int(completion.results['err'])
|
||||
|
||||
|
||||
def _io(io, queue, address, size, data, offset, direction):
|
||||
if direction == IoDir.READ:
|
||||
_data = Data.from_bytes(bytes(size))
|
||||
else:
|
||||
_data = Data.from_bytes(data, offset, size)
|
||||
ret = __io(io, queue, address, size, _data, direction)
|
||||
if not ret and direction == IoDir.READ:
|
||||
memmove(cast(data, c_void_p).value + offset, _data.handle, size)
|
||||
return ret
|
||||
|
||||
def io_to_core(core, address, size, data, offset, direction):
|
||||
return _io(core.new_core_io(), core.cache.get_default_queue(), address, size,
|
||||
data, offset, direction)
|
||||
|
||||
def io_to_exp_obj(core, address, size, data, offset, direction):
|
||||
return _io(core.new_io(), core.cache.get_default_queue(), address, size, data,
|
||||
offset, direction)
|
||||
|
||||
def sector_to_region(sector, region_start):
|
||||
i = 0
|
||||
while i < len(region_start) - 1 and sector >= region_start[i + 1]:
|
||||
i += 1
|
||||
return i
|
||||
|
||||
class SectorStatus(IntEnum):
|
||||
DIRTY = 0,
|
||||
CLEAN = 1,
|
||||
INVALID = 2,
|
||||
|
||||
I = SectorStatus.INVALID
|
||||
D = SectorStatus.DIRTY
|
||||
C = SectorStatus.CLEAN
|
||||
|
||||
# Test reads with 4k cacheline and different combinations of sectors status and
|
||||
# IO range. Three consecutive core lines are targeted, with the middle one (no 1)
|
||||
# having all sectors status (clean, dirty, invalid) set independently. The other
|
||||
# two lines either are fully dirty/clean/invalid or have the single sector
|
||||
# neighbouring with middle core line with different status. This gives total of
|
||||
# 12 regions with independent state, listed on the diagram below.
|
||||
#
|
||||
# cache line | CL 0 | CL 1 | CL 2 |
|
||||
# sector no |01234567|89ABCDEF|(ctd..) |
|
||||
# |........|........|........|
|
||||
# region no |00000001|23456789|ABBBBBBB|
|
||||
# io start possible | | | |
|
||||
# values @START |> >>|>>>>>>>>| |
|
||||
# io end possible | | | |
|
||||
# values @END | |<<<<<<<<|<< <|
|
||||
#
|
||||
# Each test iteration is described by region states and IO start/end sectors,
|
||||
# giving total of 14 parameters
|
||||
#
|
||||
# In order to determine data consistency, cache is filled with data so so that:
|
||||
# - core sector no @n is filled with @n
|
||||
# - if clean, exported object sector no @n is filled with 100 + @n
|
||||
# - if dirty, exported object sector no @n is filled with 200 + @n
|
||||
#
|
||||
def test_wo_read_data_consistency(pyocf_ctx):
|
||||
# start sector for each region
|
||||
region_start = [0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
|
||||
# possible start sectors for test iteration
|
||||
start_sec = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
|
||||
# possible end sectors for test iteration
|
||||
end_sec = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23]
|
||||
|
||||
SECTOR_SIZE = Size.from_sector(1).B
|
||||
CACHELINE_SIZE = 4096
|
||||
WORKSET_SIZE = 3 * CACHELINE_SIZE
|
||||
WORKSET_OFFSET = 1024 * CACHELINE_SIZE
|
||||
SECTOR_COUNT = int(WORKSET_SIZE / SECTOR_SIZE)
|
||||
ITRATION_COUNT = 200
|
||||
|
||||
# fixed test cases
|
||||
fixed_combinations = [
|
||||
[I, I, D, D, D, D, D, D, D, D, I, I],
|
||||
[I, I, C, C, C, C, C, C, C, C, I, I],
|
||||
[I, I, D, D, D, I, D, D, D, D, I, I],
|
||||
[I, I, D, D, D, I, I, D, D, D, I, I],
|
||||
[I, I, I, I, D, I, I, D, C, D, I, I],
|
||||
[I, D, D, D, D, D, D, D, D, D, D, I],
|
||||
[C, C, I, D, D, I, D, D, D, D, D, I],
|
||||
[D, D, D, D, D, D, D, D, D, D, D, I],
|
||||
]
|
||||
|
||||
data = {}
|
||||
# memset n-th sector of core data with n
|
||||
data[SectorStatus.INVALID] = bytes([x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
# memset n-th sector of clean data with n + 100
|
||||
data[SectorStatus.CLEAN] = bytes([100 + x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
# memset n-th sector of dirty data with n + 200
|
||||
data[SectorStatus.DIRTY] = bytes([200 + x // SECTOR_SIZE for x in range (WORKSET_SIZE)])
|
||||
|
||||
result_b = bytes(WORKSET_SIZE)
|
||||
|
||||
cache_device = Volume(Size.from_MiB(30))
|
||||
core_device = Volume(Size.from_MiB(30))
|
||||
|
||||
cache = Cache.start_on_device(cache_device, cache_mode=CacheMode.WO)
|
||||
core = Core.using_device(core_device)
|
||||
|
||||
cache.add_core(core)
|
||||
|
||||
# generate regions status combinations and shuffle it
|
||||
combinations = []
|
||||
state_combinations = product(SectorStatus, repeat=len(region_start))
|
||||
for S in state_combinations:
|
||||
combinations.append(S)
|
||||
random.shuffle(combinations)
|
||||
|
||||
# add fixed test cases at the beginnning
|
||||
combinations = fixed_combinations + combinations
|
||||
|
||||
for S in combinations[:ITRATION_COUNT]:
|
||||
# write data to core and invalidate all CL
|
||||
cache.change_cache_mode(cache_mode = CacheMode.PT)
|
||||
io_to_exp_obj(core, WORKSET_OFFSET, len(data[SectorStatus.INVALID]), \
|
||||
data[SectorStatus.INVALID], 0, IoDir.WRITE)
|
||||
|
||||
# insert clean sectors
|
||||
cache.change_cache_mode(cache_mode = CacheMode.WT)
|
||||
for sec in range(SECTOR_COUNT):
|
||||
region = sector_to_region(sec, region_start)
|
||||
if S[region] == SectorStatus.CLEAN:
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, \
|
||||
data[SectorStatus.CLEAN], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
|
||||
# write dirty sectors
|
||||
cache.change_cache_mode(cache_mode = CacheMode.WO)
|
||||
for sec in range(SECTOR_COUNT):
|
||||
region = sector_to_region(sec, region_start)
|
||||
if S[region] == SectorStatus.DIRTY:
|
||||
io_to_exp_obj(core, WORKSET_OFFSET + SECTOR_SIZE * sec, SECTOR_SIZE, \
|
||||
data[SectorStatus.DIRTY], sec * SECTOR_SIZE, IoDir.WRITE)
|
||||
|
||||
for s in start_sec:
|
||||
for e in end_sec:
|
||||
if s > e:
|
||||
continue
|
||||
|
||||
# issue WO read
|
||||
START = s * SECTOR_SIZE
|
||||
END = e * SECTOR_SIZE
|
||||
size = (e - s + 1) * SECTOR_SIZE
|
||||
assert(0 == io_to_exp_obj(core, WORKSET_OFFSET + START, size, \
|
||||
result_b, START, IoDir.READ)), \
|
||||
"error reading in WO mode: S={}, start={}, end={}".format( \
|
||||
S, s, e)
|
||||
|
||||
# verify read data
|
||||
for sec in range(s, e + 1):
|
||||
# just check the first byte of sector
|
||||
region = sector_to_region(sec, region_start)
|
||||
check_byte = sec * SECTOR_SIZE
|
||||
assert(result_b[check_byte] == data[S[region]][check_byte]), \
|
||||
"unexpected data in sector {}, S={}, s={}, e={}\n".format( \
|
||||
sec, S, s, e)
|
||||
|
@ -62,11 +62,11 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache
|
||||
core_device.reset_stats()
|
||||
|
||||
test_data = Data.from_string("This is test data")
|
||||
io_to_core(core_exported, test_data, 20)
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_empty(core_exported, mode, cls)
|
||||
|
||||
logger.info("[STAGE] Read from exported object after initial write")
|
||||
io_from_exported_object(core_exported, test_data.size, 20)
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core_exported, mode, cls, True)
|
||||
|
||||
logger.info("[STAGE] Write to exported object after read")
|
||||
@ -75,7 +75,7 @@ def test_start_write_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: Cache
|
||||
|
||||
test_data = Data.from_string("Changed test data")
|
||||
|
||||
io_to_core(core_exported, test_data, 20)
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_after_read(core_exported, mode, cls)
|
||||
|
||||
check_md5_sums(core_exported, mode)
|
||||
@ -97,13 +97,13 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL
|
||||
|
||||
logger.info("[STAGE] Initial write to core device")
|
||||
test_data = Data.from_string("This is test data")
|
||||
io_to_core(core_exported, test_data, 20, True)
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B, True)
|
||||
|
||||
cache_device.reset_stats()
|
||||
core_device.reset_stats()
|
||||
|
||||
logger.info("[STAGE] Initial read from exported object")
|
||||
io_from_exported_object(core_exported, test_data.size, 20)
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_empty(core_exported, mode, cls)
|
||||
|
||||
logger.info("[STAGE] Write to exported object after initial read")
|
||||
@ -112,11 +112,11 @@ def test_start_read_first_and_check_mode(pyocf_ctx, mode: CacheMode, cls: CacheL
|
||||
|
||||
test_data = Data.from_string("Changed test data")
|
||||
|
||||
io_to_core(core_exported, test_data, 20)
|
||||
io_to_core(core_exported, test_data, Size.from_sector(1).B)
|
||||
check_stats_write_after_read(core_exported, mode, cls, True)
|
||||
|
||||
logger.info("[STAGE] Read from exported object after write")
|
||||
io_from_exported_object(core_exported, test_data.size, 20)
|
||||
io_from_exported_object(core_exported, test_data.size, Size.from_sector(1).B)
|
||||
check_stats_read_after_write(core_exported, mode, cls)
|
||||
|
||||
check_md5_sums(core_exported, mode)
|
||||
@ -180,7 +180,7 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
|
||||
run_io_and_cache_data_if_possible(core_exported, mode, cls)
|
||||
|
||||
stats = cache.get_stats()
|
||||
assert int(stats["conf"]["dirty"]) == (1 if mode == CacheMode.WB else 0), "Dirty data before MD5"
|
||||
assert int(stats["conf"]["dirty"]) == (1 if mode.lazy_write() else 0), "Dirty data before MD5"
|
||||
|
||||
md5_exported_core = core_exported.exp_obj_md5()
|
||||
|
||||
@ -188,7 +188,7 @@ def test_stop(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, with_flush: bool):
|
||||
cache.flush()
|
||||
cache.stop()
|
||||
|
||||
if mode == CacheMode.WB and not with_flush:
|
||||
if mode.lazy_write() and not with_flush:
|
||||
pytest.xfail("MD5 sums equal without flush with dirty data") # TODO: remove after WB fixed
|
||||
assert core_device.md5() != md5_exported_core, \
|
||||
"MD5 check: core device vs exported object with dirty data"
|
||||
@ -393,38 +393,39 @@ def io_from_exported_object(exported_obj: Core, buffer_size: int, offset: int):
|
||||
def check_stats_read_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.PT else 1), \
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == (1 if mode.read_insert() else 0), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.READ] == 1, "Reads from core device"
|
||||
assert stats["req"]["rd_full_misses"]["value"] == (0 if mode == CacheMode.PT else 1), \
|
||||
"Read full misses"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
|
||||
((cls / CacheLineSize.LINE_4KiB) if mode.read_insert() else 0), "Occupancy"
|
||||
|
||||
|
||||
def check_stats_write_empty(exported_obj: Core, mode: CacheMode, cls: CacheLineSize):
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
|
||||
# TODO(ajrutkow): why 1 for WT ??
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(2 if mode == CacheMode.WB else (1 if mode == CacheMode.WT else 0)), \
|
||||
(2 if mode.lazy_write() else (1 if mode == CacheMode.WT else 0)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.WB else 1), \
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
"Writes to core device"
|
||||
assert stats["req"]["wr_full_misses"]["value"] == (1 if mode in {CacheMode.WT, CacheMode.WB} else 0), \
|
||||
assert stats["req"]["wr_full_misses"]["value"] == (1 if mode.write_insert() else 0), \
|
||||
"Write full misses"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
((cls / CacheLineSize.LINE_4KiB) if mode in {CacheMode.WB, CacheMode.WT} else 0), \
|
||||
((cls / CacheLineSize.LINE_4KiB) if mode.write_insert() else 0), \
|
||||
"Occupancy"
|
||||
|
||||
|
||||
def check_stats_write_after_read(exported_obj: Core, mode: CacheMode, cls: CacheLineSize, read_from_empty=False):
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (2 if read_from_empty and mode == CacheMode.WB else 1)), \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (2 if read_from_empty and mode.lazy_write() else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode == CacheMode.WB else 1), \
|
||||
assert exported_obj.device.get_stats()[IoDir.WRITE] == (0 if mode.lazy_write() else 1), \
|
||||
"Writes to core device"
|
||||
assert stats["req"]["wr_hits"]["value"] == (0 if mode in {CacheMode.WI, CacheMode.PT} else 1), \
|
||||
assert stats["req"]["wr_hits"]["value"] == (1 if (mode.read_insert() and mode != CacheMode.WI) or (mode.write_insert() and not read_from_empty) else 0), \
|
||||
"Write hits"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
(0 if mode in {CacheMode.WI, CacheMode.PT} else (cls / CacheLineSize.LINE_4KiB)), \
|
||||
@ -434,26 +435,26 @@ def check_stats_write_after_read(exported_obj: Core, mode: CacheMode, cls: Cache
|
||||
def check_stats_read_after_write(exported_obj, mode, cls, write_to_empty=False):
|
||||
stats = exported_obj.cache.get_stats()
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.WRITE] == \
|
||||
(2 if mode == CacheMode.WB else (0 if mode == CacheMode.PT else 1)), \
|
||||
(2 if mode.lazy_write() else (0 if mode == CacheMode.PT else 1)), \
|
||||
"Writes to cache device"
|
||||
assert exported_obj.cache.device.get_stats()[IoDir.READ] == \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
"Reads from cache device"
|
||||
assert exported_obj.device.get_stats()[IoDir.READ] == \
|
||||
(0 if mode in {CacheMode.WB, CacheMode.WT} or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
||||
(0 if mode in {CacheMode.WB, CacheMode.WO, CacheMode.WT} or (mode == CacheMode.WA and not write_to_empty) else 1), \
|
||||
"Reads from core device"
|
||||
assert stats["req"]["rd_full_misses"]["value"] == (1 if mode in {CacheMode.WA, CacheMode.WI} else 0) \
|
||||
+ (0 if write_to_empty or mode in {CacheMode.PT, CacheMode.WA} else 1), \
|
||||
"Read full misses"
|
||||
assert stats["req"]["rd_hits"]["value"] == \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
(1 if mode in {CacheMode.WT, CacheMode.WB, CacheMode.WO} or (mode == CacheMode.WA and not write_to_empty) else 0), \
|
||||
"Read hits"
|
||||
assert stats["usage"]["occupancy"]["value"] == \
|
||||
(0 if mode == CacheMode.PT else (cls / CacheLineSize.LINE_4KiB)), "Occupancy"
|
||||
|
||||
|
||||
def check_md5_sums(exported_obj: Core, mode: CacheMode):
|
||||
if mode == CacheMode.WB:
|
||||
if mode.lazy_write():
|
||||
assert exported_obj.device.md5() != exported_obj.exp_obj_md5(), \
|
||||
"MD5 check: core device vs exported object without flush"
|
||||
exported_obj.cache.flush()
|
||||
|
Loading…
Reference in New Issue
Block a user