Parallel eviction
Eviction changes allowing to evict (remap) cachelines while holding hash bucket write lock instead of global metadata write lock. As eviction (replacement) is now tightly coupled with request, each request uses eviction size equal to number of its unmapped cachelines. Evicting without global metadata write lock is possible thanks to the fact that remaping is always performed while exclusively holding cacheline (read or write) lock. So for a cacheline on LRU list we acquire cacheline lock, safely resolve hash and consequently write-lock hash bucket. Since cacheline lock is acquired under hash bucket (everywhere except for new eviction implementation), we are certain that noone acquires cacheline lock behind our back. Concurrent eviction threads are eliminated by holding eviction list lock for the duration of critial locking operations. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
@@ -6,6 +6,7 @@
|
||||
#include "eviction.h"
|
||||
#include "ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
[ocf_eviction_lru] = {
|
||||
@@ -16,12 +17,13 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
.init_evp = evp_lru_init_evp,
|
||||
.dirty_cline = evp_lru_dirty_cline,
|
||||
.clean_cline = evp_lru_clean_cline,
|
||||
.flush_dirty = evp_lru_clean,
|
||||
.name = "lru",
|
||||
},
|
||||
};
|
||||
|
||||
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
|
||||
struct ocf_user_part *part, uint32_t to_evict)
|
||||
{
|
||||
|
||||
uint32_t curr_part_size = ocf_part_get_occupancy(part);
|
||||
@@ -35,33 +37,34 @@ static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (roundup && to_evict < OCF_TO_EVICTION_MIN)
|
||||
to_evict = OCF_TO_EVICTION_MIN;
|
||||
|
||||
if (to_evict > (curr_part_size - min_part_size))
|
||||
to_evict = curr_part_size - min_part_size;
|
||||
|
||||
return to_evict;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, const uint32_t evict_cline_no,
|
||||
static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
struct ocf_user_part *target_part)
|
||||
{
|
||||
uint32_t unmapped = ocf_engine_unmapped_count(req);
|
||||
uint32_t to_evict = 0;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
if (!evp_lru_can_evict(req->cache))
|
||||
return 0;
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no,
|
||||
false);
|
||||
to_evict = ocf_evict_calculate(req->cache, target_part, unmapped);
|
||||
|
||||
return ocf_eviction_need_space(cache, io_queue,
|
||||
target_part, to_evict);
|
||||
if (to_evict < unmapped) {
|
||||
/* cannot evict enough cachelines to map request,
|
||||
so no purpose in evicting anything */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ocf_eviction_need_space(req->cache, req, target_part, to_evict);
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_request *req, uint32_t evict_cline_no,
|
||||
bool overflown_only, int16_t max_priority)
|
||||
{
|
||||
uint32_t to_evict = 0, evicted = 0;
|
||||
@@ -98,7 +101,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, part,
|
||||
evict_cline_no - evicted, true);
|
||||
evict_cline_no - evicted);
|
||||
if (to_evict == 0) {
|
||||
/* No cache lines to evict for this partition */
|
||||
continue;
|
||||
@@ -107,8 +110,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
if (overflown_only)
|
||||
to_evict = OCF_MIN(to_evict, overflow_size);
|
||||
|
||||
evicted += ocf_eviction_need_space(cache, io_queue,
|
||||
part, to_evict);
|
||||
evicted += ocf_eviction_need_space(cache, req, part, to_evict);
|
||||
|
||||
if (evicted >= evict_cline_no) {
|
||||
/* Evicted requested number of cache line, stop
|
||||
@@ -122,10 +124,12 @@ out:
|
||||
return evicted;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, uint32_t evict_cline_no,
|
||||
struct ocf_user_part *target_part)
|
||||
static inline uint32_t ocf_evict_do(struct ocf_request *req)
|
||||
{
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_part_id_t target_part_id = req->part_id;
|
||||
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
|
||||
uint32_t evict_cline_no = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
|
||||
/* First attempt to evict overflown partitions in order to
|
||||
@@ -134,7 +138,7 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* free its cachelines regardless of destination partition
|
||||
* priority. */
|
||||
|
||||
evicted = ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted = ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
true, OCF_IO_CLASS_PRIO_PINNED);
|
||||
if (evicted >= evict_cline_no)
|
||||
return evicted;
|
||||
@@ -142,35 +146,26 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
|
||||
* partitions with priority <= target partition and attempt
|
||||
* to evict from those. */
|
||||
evict_cline_no -= evicted;
|
||||
evicted += ocf_evict_partitions(cache, io_queue, evict_cline_no,
|
||||
evicted += ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
false, target_part->config->priority);
|
||||
|
||||
return evicted;
|
||||
}
|
||||
|
||||
int space_managment_evict_do(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no)
|
||||
int space_managment_evict_do(struct ocf_request *req)
|
||||
{
|
||||
uint32_t needed = ocf_engine_unmapped_count(req);
|
||||
uint32_t evicted;
|
||||
uint32_t free;
|
||||
struct ocf_user_part *req_part = &cache->user_parts[req->part_id];
|
||||
struct ocf_user_part *req_part = &req->cache->user_parts[req->part_id];
|
||||
|
||||
if (ocf_req_part_evict(req)) {
|
||||
evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no,
|
||||
req_part);
|
||||
evicted = ocf_evict_part_do(req, req_part);
|
||||
} else {
|
||||
free = ocf_freelist_num_free(cache->freelist);
|
||||
if (evict_cline_no <= free)
|
||||
return LOOKUP_INSERTED;
|
||||
|
||||
evict_cline_no -= free;
|
||||
|
||||
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
|
||||
evicted = ocf_evict_do(req);
|
||||
}
|
||||
|
||||
if (evict_cline_no <= evicted)
|
||||
if (needed <= evicted)
|
||||
return LOOKUP_INSERTED;
|
||||
|
||||
ocf_req_set_mapping_error(req);
|
||||
return LOOKUP_MISS;
|
||||
}
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#include "lru.h"
|
||||
#include "lru_structs.h"
|
||||
|
||||
#define OCF_TO_EVICTION_MIN 128UL
|
||||
#define OCF_PENDING_EVICTION_LIMIT 512UL
|
||||
|
||||
#define OCF_NUM_EVICTION_LISTS 32
|
||||
@@ -40,11 +39,9 @@ struct eviction_policy_ops {
|
||||
void (*rm_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
bool (*can_evict)(ocf_cache_t cache);
|
||||
uint32_t (*req_clines)(ocf_cache_t cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*hot_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
||||
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
|
||||
void (*dirty_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
@@ -52,6 +49,8 @@ struct eviction_policy_ops {
|
||||
void (*clean_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@@ -64,8 +63,7 @@ extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
|
||||
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
|
||||
* 'LOOKUP_MISS' otherwise
|
||||
*/
|
||||
int space_managment_evict_do(ocf_cache_t cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no);
|
||||
int space_managment_evict_do(struct ocf_request *req);
|
||||
|
||||
int space_management_free(ocf_cache_t cache, uint32_t count);
|
||||
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include "../mngt/ocf_mngt_common.h"
|
||||
#include "../engine/engine_zero.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
#define OCF_EVICTION_MAX_SCAN 1024
|
||||
|
||||
@@ -261,7 +262,8 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
||||
}
|
||||
|
||||
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean)
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
||||
bool cl_lock_write, _lru_hash_locked_pfn hash_locked, void *context)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
@@ -275,11 +277,47 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
|
||||
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
|
||||
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
|
||||
iter->clean = clean;
|
||||
iter->cl_lock_write = cl_lock_write;
|
||||
iter->hash_locked = hash_locked;
|
||||
iter->context = context;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
|
||||
}
|
||||
|
||||
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp)
|
||||
{
|
||||
/* Lock cachelines for read, non-exclusive access */
|
||||
lru_iter_init(iter, cache, part, start_evp, false, false,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static bool _evp_lru_evict_hash_locked(void *context,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
struct ocf_request *req = context;
|
||||
|
||||
return ocf_req_hash_in_range(req, core_id, core_line);
|
||||
}
|
||||
|
||||
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
uint32_t start_evp, bool cl_lock_write,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
/* Lock hash buckets for write, cachelines according to user request,
|
||||
* however exclusive cacheline access is needed even in case of read
|
||||
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
|
||||
* is already locked as part of request hash locking (to avoid attempt
|
||||
* to acquire the same hash bucket lock twice) */
|
||||
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
|
||||
_evp_lru_evict_hash_locked, req);
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
{
|
||||
unsigned increment;
|
||||
@@ -292,6 +330,8 @@ static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
|
||||
return iter->evp;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
|
||||
{
|
||||
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
|
||||
@@ -308,143 +348,253 @@ static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
|
||||
return iter->num_avail_evps == 0;
|
||||
}
|
||||
|
||||
/* get next non-empty lru list if available */
|
||||
static inline ocf_cache_line_t lru_iter_next(struct ocf_lru_iter *iter)
|
||||
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
return iter->cl_lock_write ?
|
||||
ocf_cache_line_try_lock_wr(c, cline) :
|
||||
ocf_cache_line_try_lock_rd(c, cline);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
struct ocf_cache_line_concurrency *c =
|
||||
ocf_cache_line_concurrency(iter->cache);
|
||||
|
||||
if (iter->cl_lock_write)
|
||||
ocf_cache_line_unlock_wr(c, cline);
|
||||
else
|
||||
ocf_cache_line_unlock_rd(c, cline);
|
||||
}
|
||||
|
||||
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return ocf_hb_cline_naked_trylock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static void inline _lru_unlock_hash(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t core_id, uint64_t core_line)
|
||||
{
|
||||
if (iter->hash_locked != NULL && iter->hash_locked(
|
||||
iter->context,
|
||||
core_id, core_line)) {
|
||||
return;
|
||||
}
|
||||
|
||||
ocf_hb_cline_naked_unlock_wr(
|
||||
&iter->cache->metadata.lock,
|
||||
core_id, core_line);
|
||||
}
|
||||
|
||||
static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
|
||||
ocf_cache_line_t cache_line,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
|
||||
{
|
||||
if (!_lru_trylock_cacheline(iter, cache_line))
|
||||
return false;
|
||||
|
||||
ocf_metadata_get_core_info(iter->cache, cache_line,
|
||||
core_id, core_line);
|
||||
|
||||
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ocf_cache_line_is_locked_exclusively(iter->cache,
|
||||
cache_line)) {
|
||||
_lru_unlock_hash(iter, *core_id, *core_line);
|
||||
_lru_unlock_cacheline(iter, cache_line);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
|
||||
* eviction list lock. Returned cacheline is read or write locked, depending on
|
||||
* iter->write_lock. Returned cacheline has corresponding metadata hash bucket
|
||||
* locked. Cacheline is moved to the head of lru list before being returned */
|
||||
static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
|
||||
ocf_core_id_t *core_id, uint64_t *core_line)
|
||||
{
|
||||
struct lru_eviction_policy_meta *node;
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t ret;
|
||||
ocf_cache_line_t cline;
|
||||
ocf_cache_t cache = iter->cache;
|
||||
struct ocf_user_part *part = iter->part;
|
||||
struct ocf_lru_list *list;
|
||||
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
|
||||
while (iter->curr_cline[curr_evp] == end_marker) {
|
||||
if (!_lru_evp_is_empty(iter)) {
|
||||
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
list = evp_lru_get_list(part, curr_evp, iter->clean);
|
||||
|
||||
cline = list->tail;
|
||||
while (cline != end_marker && !_lru_iter_evition_lock(iter,
|
||||
cline, core_id, core_line)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
|
||||
if (cline != end_marker) {
|
||||
remove_lru_list(cache, list, cline);
|
||||
add_lru_head(cache, list, cline);
|
||||
balance_lru_list(cache, list);
|
||||
}
|
||||
|
||||
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, curr_evp);
|
||||
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
if (_lru_evp_all_empty(iter)) {
|
||||
/* all lists empty */
|
||||
return end_marker;
|
||||
}
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return cline;
|
||||
}
|
||||
|
||||
/* Get next dirty cacheline from tail of lru lists. Caller must hold all
|
||||
* eviction list locks during entire iteration proces. Returned cacheline
|
||||
* is read or write locked, depending on iter->write_lock */
|
||||
static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
|
||||
{
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
|
||||
do {
|
||||
curr_evp = _lru_next_evp(iter);
|
||||
}
|
||||
cline = iter->curr_cline[curr_evp];
|
||||
|
||||
node = &ocf_metadata_get_eviction_policy(iter->cache,
|
||||
iter->curr_cline[curr_evp])->lru;
|
||||
ret = iter->curr_cline[curr_evp];
|
||||
iter->curr_cline[curr_evp] = node->prev;
|
||||
while (cline != end_marker && !_lru_trylock_cacheline(iter,
|
||||
cline)) {
|
||||
cline = ocf_metadata_get_eviction_policy(
|
||||
iter->cache, cline)->lru.prev;
|
||||
}
|
||||
if (cline != end_marker) {
|
||||
iter->curr_cline[curr_evp] =
|
||||
ocf_metadata_get_eviction_policy(
|
||||
iter->cache , cline)->lru.prev;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
|
||||
/* mark list as empty */
|
||||
_lru_evp_set_empty(iter);
|
||||
}
|
||||
} while (cline == end_marker && !_lru_evp_all_empty(iter));
|
||||
|
||||
return cline;
|
||||
}
|
||||
|
||||
static void evp_lru_clean_end(void *private_data, int error)
|
||||
{
|
||||
struct ocf_lru_iter *iter = private_data;
|
||||
struct ocf_part_cleaning_ctx *ctx = private_data;
|
||||
unsigned i;
|
||||
|
||||
ocf_refcnt_dec(&iter->part->cleaning);
|
||||
}
|
||||
|
||||
static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t item, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_lru_iter *iter = getter_context;
|
||||
ocf_cache_line_t cline;
|
||||
|
||||
while (true) {
|
||||
cline = lru_iter_next(iter);
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(!metadata_test_dirty(cache, cline));
|
||||
|
||||
*line = cline;
|
||||
return 0;
|
||||
for (i = 0; i < OCF_EVICTION_CLEAN_SIZE; i++) {
|
||||
if (ctx->cline[i] != end_marker)
|
||||
ocf_cache_line_unlock_rd(ctx->cache->device->concurrency
|
||||
.cache_line, ctx->cline[i]);
|
||||
}
|
||||
|
||||
return -1;
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
}
|
||||
|
||||
static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
struct ocf_user_part *part, uint32_t count)
|
||||
static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
|
||||
uint32_t idx, ocf_cache_line_t *line)
|
||||
{
|
||||
struct ocf_refcnt *counter = &part->cleaning;
|
||||
struct ocf_part_cleaning_ctx *ctx = getter_context;
|
||||
|
||||
if (ctx->cline[idx] == end_marker)
|
||||
return -1;
|
||||
|
||||
ENV_BUG_ON(!metadata_test_dirty(ctx->cache, ctx->cline[idx]));
|
||||
*line = ctx->cline[idx];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count)
|
||||
{
|
||||
struct ocf_part_cleaning_ctx *ctx = &part->cleaning;
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.lock_cacheline = true,
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = true,
|
||||
|
||||
.cmpl_context = &part->eviction_clean_iter,
|
||||
.cmpl_context = &part->cleaning,
|
||||
.cmpl_fn = evp_lru_clean_end,
|
||||
|
||||
.getter = evp_lru_clean_getter,
|
||||
.getter_context = &part->eviction_clean_iter,
|
||||
.getter = evp_lru_clean_get,
|
||||
.getter_context = &part->cleaning,
|
||||
|
||||
.count = count > 32 ? 32 : count,
|
||||
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
|
||||
|
||||
.io_queue = io_queue
|
||||
};
|
||||
ocf_cache_line_t *cline = part->cleaning.cline;
|
||||
struct ocf_lru_iter iter;
|
||||
unsigned evp;
|
||||
int cnt;
|
||||
unsigned i;
|
||||
unsigned lock_idx;
|
||||
|
||||
if (ocf_mngt_cache_is_locked(cache))
|
||||
return;
|
||||
|
||||
cnt = ocf_refcnt_inc(counter);
|
||||
cnt = ocf_refcnt_inc(&ctx->counter);
|
||||
if (!cnt) {
|
||||
/* cleaner disabled by management operation */
|
||||
return;
|
||||
}
|
||||
|
||||
if (cnt > 1) {
|
||||
/* cleaning already running for this partition */
|
||||
ocf_refcnt_dec(counter);
|
||||
ocf_refcnt_dec(&ctx->counter);
|
||||
return;
|
||||
}
|
||||
|
||||
lru_iter_init(&part->eviction_clean_iter, cache, part,
|
||||
part->eviction_clean_iter.evp, false);
|
||||
part->cleaning.cache = cache;
|
||||
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
|
||||
ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
OCF_METADATA_EVICTION_WR_LOCK_ALL();
|
||||
|
||||
lru_iter_cleaning_init(&iter, cache, part, evp);
|
||||
i = 0;
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE) {
|
||||
cline[i] = lru_iter_cleaning_next(&iter);
|
||||
if (cline[i] == end_marker)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE)
|
||||
cline[i++] = end_marker;
|
||||
|
||||
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
|
||||
|
||||
ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
|
||||
|
||||
ocf_cleaner_fire(cache, &attribs);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
|
||||
{
|
||||
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
|
||||
}
|
||||
|
||||
static void evp_lru_zero_line(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_request *req;
|
||||
ocf_core_id_t id;
|
||||
uint64_t addr, core_line;
|
||||
|
||||
ocf_metadata_get_core_info(cache, line, &id, &core_line);
|
||||
addr = core_line * ocf_line_size(cache);
|
||||
|
||||
req = ocf_req_new(io_queue, &cache->core[id], addr,
|
||||
ocf_line_size(cache), OCF_WRITE);
|
||||
if (!req)
|
||||
return;
|
||||
|
||||
if (req->d2c) {
|
||||
/* cache device is being detached */
|
||||
ocf_req_put(req);
|
||||
return;
|
||||
}
|
||||
|
||||
req->info.internal = true;
|
||||
req->complete = evp_lru_zero_line_complete;
|
||||
|
||||
env_atomic_inc(&cache->pending_eviction_clines);
|
||||
|
||||
ocf_engine_zero_line(req);
|
||||
}
|
||||
|
||||
bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
{
|
||||
if (env_atomic_read(&cache->pending_eviction_clines) >=
|
||||
@@ -455,73 +605,86 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dirty_pages_present(ocf_cache_t cache, struct ocf_user_part *part)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
|
||||
if (evp_lru_get_list(part, i, false)->tail != end_marker)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* the caller must hold the metadata lock */
|
||||
uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no)
|
||||
{
|
||||
struct ocf_lru_iter iter;
|
||||
uint32_t i;
|
||||
ocf_cache_line_t cline;
|
||||
uint64_t core_line;
|
||||
ocf_core_id_t core_id;
|
||||
ocf_cache_t cache = req->cache;
|
||||
bool cl_write_lock =
|
||||
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
|
||||
unsigned evp;
|
||||
unsigned req_idx = 0;
|
||||
|
||||
if (cline_no == 0)
|
||||
return 0;
|
||||
|
||||
lru_iter_init(&iter, cache, part, part->next_eviction_list, true);
|
||||
if (unlikely(ocf_engine_unmapped_count(req) < cline_no)) {
|
||||
ocf_cache_log(req->cache, log_err, "Not enough space in"
|
||||
"request: unmapped %u, requested %u",
|
||||
ocf_engine_unmapped_count(req),
|
||||
cline_no);
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lru_iter_eviction_init(&iter, cache, part, evp, cl_write_lock, req);
|
||||
|
||||
i = 0;
|
||||
while (i < cline_no) {
|
||||
cline = lru_iter_next(&iter);
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
cline = lru_iter_eviction_next(&iter, &core_id, &core_line);
|
||||
|
||||
if (cline == end_marker)
|
||||
break;
|
||||
|
||||
if (!evp_lru_can_evict(cache))
|
||||
break;
|
||||
|
||||
/* Prevent evicting already locked items */
|
||||
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||
cline)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(metadata_test_dirty(cache, cline));
|
||||
|
||||
if (ocf_volume_is_atomic(&cache->device->volume)) {
|
||||
/* atomic cache, we have to trim cache lines before
|
||||
* eviction
|
||||
*/
|
||||
evp_lru_zero_line(cache, io_queue, cline);
|
||||
continue;
|
||||
/* TODO: if atomic mode is restored, need to zero metadata
|
||||
* before proceeding with cleaning (see version <= 20.12) */
|
||||
|
||||
/* find next unmapped cacheline in request */
|
||||
while (req_idx + 1 < req->core_line_count &&
|
||||
req->map[req_idx].status != LOOKUP_MISS) {
|
||||
req_idx++;
|
||||
}
|
||||
|
||||
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(
|
||||
cache, cline);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
cline);
|
||||
metadata_clear_valid_sec(cache, cline, 0, ocf_line_end_sector(cache));
|
||||
ocf_metadata_remove_from_collision(cache, cline, part->id);
|
||||
ocf_metadata_end_collision_shared_access(
|
||||
cache, cline);
|
||||
|
||||
_lru_unlock_hash(&iter, core_id, core_line);
|
||||
|
||||
env_atomic_dec(&req->core->runtime_meta->cached_clines);
|
||||
env_atomic_dec(&req->core->runtime_meta->
|
||||
part_counters[part->id].cached_clines);
|
||||
|
||||
ocf_map_cache_line(req, req_idx, cline);
|
||||
|
||||
req->map[req_idx].status = LOOKUP_REMAPPED;
|
||||
ocf_engine_patch_req_info(cache, req, req_idx);
|
||||
|
||||
if (cl_write_lock)
|
||||
req->map[req_idx].wr_locked = true;
|
||||
else
|
||||
req->map[req_idx].rd_locked = true;
|
||||
|
||||
++req_idx;
|
||||
++i;
|
||||
}
|
||||
|
||||
part->next_eviction_list = iter.evp;
|
||||
|
||||
if (i < cline_no && dirty_pages_present(cache, part))
|
||||
evp_lru_clean(cache, io_queue, part, cline_no - i);
|
||||
|
||||
/* Return number of clines that were really evicted */
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@@ -9,11 +9,12 @@
|
||||
#include "lru_structs.h"
|
||||
|
||||
struct ocf_user_part;
|
||||
struct ocf_request;
|
||||
|
||||
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
bool evp_lru_can_evict(struct ocf_cache *cache);
|
||||
uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no);
|
||||
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_user_part *part);
|
||||
@@ -21,5 +22,6 @@ void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
uint32_t cline);
|
||||
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
#endif
|
||||
|
@@ -52,8 +52,8 @@ static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
|
||||
struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t clines)
|
||||
{
|
||||
uint8_t type;
|
||||
@@ -64,11 +64,7 @@ static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].req_clines)) {
|
||||
/*
|
||||
* This is called under METADATA WR lock. No need to get
|
||||
* eviction lock.
|
||||
*/
|
||||
result = evict_policy_ops[type].req_clines(cache, io_queue,
|
||||
result = evict_policy_ops[type].req_clines(req,
|
||||
part, clines);
|
||||
}
|
||||
|
||||
@@ -101,4 +97,18 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, ocf_queue_t io_queue,
|
||||
uint32_t count)
|
||||
{
|
||||
uint8_t type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].flush_dirty)) {
|
||||
evict_policy_ops[type].flush_dirty(cache, part, io_queue,
|
||||
count);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */
|
||||
|
Reference in New Issue
Block a user