Multiple LRU lists

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2020-05-21 12:36:16 +02:00
parent ac83c4ecd6
commit 41a767de97
9 changed files with 276 additions and 111 deletions

View File

@ -9,19 +9,23 @@
int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
{
int err = 0;
unsigned i;
unsigned evp_iter;
unsigned part_iter;
err = env_spinlock_init(&metadata_lock->eviction);
if (err)
return err;
for (evp_iter = 0; evp_iter < OCF_NUM_EVICTION_LISTS; evp_iter++) {
err = env_spinlock_init(&metadata_lock->eviction[evp_iter]);
if (err)
goto eviction_err;
}
env_rwlock_init(&metadata_lock->status);
err = env_rwsem_init(&metadata_lock->global);
if (err)
goto rwsem_err;
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
err = env_spinlock_init(&metadata_lock->partition[i]);
for (part_iter = 0; part_iter < OCF_IO_CLASS_MAX; part_iter++) {
err = env_spinlock_init(&metadata_lock->partition[part_iter]);
if (err)
goto spinlocks_err;
}
@ -29,11 +33,15 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
return err;
spinlocks_err:
while (i--)
env_spinlock_destroy(&metadata_lock->partition[i]);
while (part_iter--)
env_spinlock_destroy(&metadata_lock->partition[part_iter]);
rwsem_err:
env_rwlock_destroy(&metadata_lock->status);
env_spinlock_destroy(&metadata_lock->eviction);
eviction_err:
while (evp_iter--)
env_spinlock_destroy(&metadata_lock->eviction[evp_iter]);
return err;
}
@ -41,11 +49,12 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
{
unsigned i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
env_spinlock_destroy(&metadata_lock->partition[i]);
}
env_spinlock_destroy(&metadata_lock->eviction);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
env_spinlock_destroy(&metadata_lock->eviction[i]);
env_rwlock_destroy(&metadata_lock->status);
env_rwsem_destroy(&metadata_lock->global);
}

View File

@ -3,6 +3,7 @@
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../ocf_cache_priv.h"
#include "../eviction/eviction.h"
#ifndef __OCF_METADATA_CONCURRENCY_H__
#define __OCF_METADATA_CONCURRENCY_H__
@ -22,17 +23,49 @@ void ocf_metadata_concurrency_attached_deinit(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_eviction_lock(
struct ocf_metadata_lock *metadata_lock)
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_spinlock_lock(&metadata_lock->eviction);
env_spinlock_lock(&metadata_lock->eviction[ev_list]);
}
static inline void ocf_metadata_eviction_unlock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_spinlock_unlock(&metadata_lock->eviction[ev_list]);
}
static inline void ocf_metadata_eviction_lock_all(
struct ocf_metadata_lock *metadata_lock)
{
env_spinlock_unlock(&metadata_lock->eviction);
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_lock(metadata_lock, i);
}
static inline void ocf_metadata_eviction_unlock_all(
struct ocf_metadata_lock *metadata_lock)
{
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_unlock(metadata_lock, i);
}
#define OCF_METADATA_EVICTION_LOCK(cline) \
ocf_metadata_eviction_lock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_EVICTION_UNLOCK(cline) \
ocf_metadata_eviction_unlock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_EVICTION_LOCK_ALL() \
ocf_metadata_eviction_lock_all(&cache->metadata.lock)
#define OCF_METADATA_EVICTION_UNLOCK_ALL() \
ocf_metadata_eviction_unlock_all(&cache->metadata.lock)
static inline void ocf_metadata_partition_lock(
struct ocf_metadata_lock *metadata_lock,
ocf_part_id_t part_id)
@ -47,12 +80,6 @@ static inline void ocf_metadata_partition_unlock(
env_spinlock_unlock(&metadata_lock->partition[part_id]);
}
#define OCF_METADATA_EVICTION_LOCK() \
ocf_metadata_eviction_lock(&cache->metadata.lock)
#define OCF_METADATA_EVICTION_UNLOCK() \
ocf_metadata_eviction_unlock(&cache->metadata.lock)
void ocf_metadata_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock);

View File

@ -14,6 +14,8 @@
#define OCF_TO_EVICTION_MIN 128UL
#define OCF_PENDING_EVICTION_LIMIT 512UL
#define OCF_NUM_EVICTION_LISTS 32
struct eviction_policy {
union {
struct lru_eviction_policy lru;
@ -41,8 +43,7 @@ struct eviction_policy_ops {
uint32_t cline_no);
void (*hot_cline)(ocf_cache_t cache,
ocf_cache_line_t cline);
void (*init_evp)(ocf_cache_t cache,
ocf_part_id_t part_id);
void (*init_evp)(ocf_cache_t cache, ocf_part_id_t part_id);
void (*dirty_cline)(ocf_cache_t cache,
ocf_part_id_t part_id,
uint32_t cline_no);

View File

@ -163,48 +163,139 @@ void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
node->next = end_marker;
}
static struct ocf_lru_list *evp_lru_get_list(struct ocf_user_part *part,
uint32_t evp, bool clean)
{
return clean ? &part->runtime->eviction[evp].policy.lru.clean :
&part->runtime->eviction[evp].policy.lru.dirty;
}
static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
return evp_lru_get_list(part, ev_list,
!metadata_test_dirty(cache, cline));
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
struct ocf_lru_list *list;
list = metadata_test_dirty(cache, cline) ?
&part->runtime->eviction.policy.lru.dirty :
&part->runtime->eviction.policy.lru.clean;
list = evp_get_cline_list(cache, cline);
remove_lru_list(cache, list, cline);
}
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
ocf_part_id_t part_id, uint32_t start_evp, bool clean)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t i;
/* entire iterator implementation depends on gcc builtins for
bit operations which works on 64 bit integers at most */
ENV_BUILD_BUG_ON(OCF_NUM_EVICTION_LISTS > sizeof(iter->evp) * 8);
iter->cache = cache;
iter->part_id = part_id;
iter->part = part;
/* set iterator value to start_evp - 1 modulo OCF_NUM_EVICTION_LISTS */
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
}
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
{
unsigned increment;
increment = __builtin_ffsll(iter->next_avail_evp);
iter->next_avail_evp = ocf_rotate_right(iter->next_avail_evp,
increment, OCF_NUM_EVICTION_LISTS);
iter->evp = (iter->evp + increment) % OCF_NUM_EVICTION_LISTS;
return iter->evp;
}
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
{
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
}
static inline void _lru_evp_set_empty(struct ocf_lru_iter *iter)
{
iter->next_avail_evp &= ~(1ULL << (OCF_NUM_EVICTION_LISTS - 1));
iter->num_avail_evps--;
}
static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
{
return iter->num_avail_evps == 0;
}
/* get next non-empty lru list if available */
static inline ocf_cache_line_t lru_iter_next(struct ocf_lru_iter *iter)
{
struct lru_eviction_policy_meta *node;
uint32_t curr_evp;
ocf_cache_line_t ret;
curr_evp = _lru_next_evp(iter);
while (iter->curr_cline[curr_evp] == end_marker) {
if (!_lru_evp_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
}
if (_lru_evp_all_empty(iter)) {
/* all lists empty */
return end_marker;
}
curr_evp = _lru_next_evp(iter);
}
node = &ocf_metadata_get_eviction_policy(iter->cache,
iter->curr_cline[curr_evp])->lru;
ret = iter->curr_cline[curr_evp];
iter->curr_cline[curr_evp] = node->prev;
return ret;
}
static void evp_lru_clean_end(void *private_data, int error)
{
struct ocf_refcnt *counter = private_data;
struct ocf_lru_iter *iter = private_data;
ocf_refcnt_dec(counter);
ocf_refcnt_dec(&iter->cache->refcnt.cleaning[iter->part_id]);
}
static int evp_lru_clean_getter(ocf_cache_t cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
uint32_t item, ocf_cache_line_t *line)
{
struct ocf_cleaner_attribs *attribs = getter_context;
ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item;
struct ocf_lru_iter *iter = getter_context;
ocf_cache_line_t cline;
while (curr_cline != end_marker) {
prev_cline = ocf_metadata_get_eviction_policy(cache,
curr_cline)->lru.prev;
while (true) {
cline = lru_iter_next(iter);
if (cline == end_marker)
break;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
if (ocf_cache_line_is_used(cache, cline)) {
continue;
}
ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline));
ENV_BUG_ON(!metadata_test_dirty(cache, cline));
*line = curr_cline;
attribs->getter_item = prev_cline;
*line = cline;
return 0;
}
@ -220,12 +311,11 @@ static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
.cache_line_lock = true,
.do_sort = true,
.cmpl_context = counter,
.cmpl_context = &part->eviction_clean_iter,
.cmpl_fn = evp_lru_clean_end,
.getter = evp_lru_clean_getter,
.getter_context = &attribs,
.getter_item = part->runtime->eviction.policy.lru.dirty.tail,
.getter_context = &part->eviction_clean_iter,
.count = count > 32 ? 32 : count,
@ -247,6 +337,9 @@ static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
return;
}
lru_iter_init(&part->eviction_clean_iter, cache, part_id,
part->eviction_clean_iter.evp, false);
ocf_cleaner_fire(cache, &attribs);
}
@ -294,64 +387,72 @@ bool evp_lru_can_evict(ocf_cache_t cache)
return true;
}
static bool dirty_pages_present(ocf_cache_t cache, ocf_part_id_t part_id)
{
uint32_t i;
struct ocf_user_part *part = &cache->user_parts[part_id];
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
if (evp_lru_get_list(part, i, false)->tail != end_marker)
return true;
}
return false;
}
/* the caller must hold the metadata lock */
uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no)
{
struct ocf_lru_iter iter;
uint32_t i;
ocf_cache_line_t curr_cline, prev_cline;
ocf_cache_line_t cline;
struct ocf_user_part *part = &cache->user_parts[part_id];
if (cline_no == 0)
return 0;
i = 0;
curr_cline = part->runtime->eviction.policy.lru.clean.tail;
/* Find cachelines to be evicted. */
lru_iter_init(&iter, cache, part_id, part->next_eviction_list, true);
i = 0;
while (i < cline_no) {
cline = lru_iter_next(&iter);
if (cline == end_marker)
break;
if (!evp_lru_can_evict(cache))
break;
if (curr_cline == end_marker)
break;
prev_cline = ocf_metadata_get_eviction_policy(cache,
curr_cline)->lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
if (ocf_cache_line_is_used(cache, cline))
continue;
}
ENV_BUG_ON(metadata_test_dirty(cache, curr_cline));
ENV_BUG_ON(metadata_test_dirty(cache, cline));
if (ocf_volume_is_atomic(&cache->device->volume)) {
/* atomic cache, we have to trim cache lines before
* eviction
*/
evp_lru_zero_line(cache, io_queue, curr_cline);
} else {
ocf_metadata_start_collision_shared_access(cache,
curr_cline);
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
curr_cline);
ocf_metadata_end_collision_shared_access(cache,
curr_cline);
/* Goto next item. */
i++;
evp_lru_zero_line(cache, io_queue, cline);
continue;
}
curr_cline = prev_cline;
ocf_metadata_start_collision_shared_access(
cache, cline);
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
cline);
ocf_metadata_end_collision_shared_access(
cache, cline);
++i;
}
if (i < cline_no && part->runtime->eviction.policy.lru.dirty.tail !=
end_marker) {
part->next_eviction_list = iter.evp;
if (i < cline_no && dirty_pages_present(cache, part_id))
evp_lru_clean(cache, io_queue, part_id, cline_no - i);
}
/* Return number of clines that were really evicted */
return i;
@ -360,18 +461,11 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
struct lru_eviction_policy_meta *node;
int cline_dirty;
struct ocf_lru_list *list;
node = &ocf_metadata_get_eviction_policy(cache, cline)->lru;
cline_dirty = metadata_test_dirty(cache, cline);
list = cline_dirty ?
&part->runtime->eviction.policy.lru.dirty :
&part->runtime->eviction.policy.lru.clean;
list = evp_get_cline_list(cache, cline);
if (node->next != end_marker ||
node->prev != end_marker ||
@ -395,43 +489,48 @@ void evp_lru_init_evp(ocf_cache_t cache, ocf_part_id_t part_id)
struct ocf_user_part *part = &cache->user_parts[part_id];
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
uint32_t i;
clean_list = &part->runtime->eviction.policy.lru.clean;
dirty_list = &part->runtime->eviction.policy.lru.dirty;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
clean_list = evp_lru_get_list(part, i, true);
dirty_list = evp_lru_get_list(part, i, false);
_lru_init(clean_list);
_lru_init(dirty_list);
_lru_init(clean_list);
_lru_init(dirty_list);
}
}
void evp_lru_clean_cline(ocf_cache_t cache, ocf_part_id_t part_id,
uint32_t cline)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = &part->runtime->eviction.policy.lru.clean;
dirty_list = &part->runtime->eviction.policy.lru.dirty;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_LOCK();
OCF_METADATA_EVICTION_LOCK(cline);
remove_lru_list(cache, dirty_list, cline);
add_lru_head(cache, clean_list, cline);
OCF_METADATA_EVICTION_UNLOCK();
OCF_METADATA_EVICTION_UNLOCK(cline);
}
void evp_lru_dirty_cline(ocf_cache_t cache, ocf_part_id_t part_id,
uint32_t cline)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = &part->runtime->eviction.policy.lru.clean;
dirty_list = &part->runtime->eviction.policy.lru.dirty;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_LOCK();
OCF_METADATA_EVICTION_LOCK(cline);
remove_lru_list(cache, clean_list, cline);
add_lru_head(cache, dirty_list, cline);
OCF_METADATA_EVICTION_UNLOCK();
OCF_METADATA_EVICTION_UNLOCK(cline);
}

View File

@ -36,13 +36,12 @@ static inline void ocf_eviction_purge_cache_line(
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].rm_cline)) {
OCF_METADATA_EVICTION_LOCK();
OCF_METADATA_EVICTION_LOCK(line);
evict_policy_ops[type].rm_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
OCF_METADATA_EVICTION_UNLOCK(line);
}
}
static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
@ -83,9 +82,9 @@ static inline void ocf_eviction_set_hot_cache_line(
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].hot_cline)) {
OCF_METADATA_EVICTION_LOCK();
OCF_METADATA_EVICTION_LOCK(line);
evict_policy_ops[type].hot_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
OCF_METADATA_EVICTION_UNLOCK(line);
}
}
@ -97,9 +96,9 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_LOCK();
OCF_METADATA_EVICTION_LOCK_ALL();
evict_policy_ops[type].init_evp(cache, part_id);
OCF_METADATA_EVICTION_UNLOCK();
OCF_METADATA_EVICTION_UNLOCK_ALL();
}
}

View File

@ -29,15 +29,37 @@ struct ocf_user_part_config {
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction;
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
struct cleaning_policy cleaning;
};
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
/* Iterator state, visiting all eviction lists within a partition
in round robin order */
struct ocf_lru_iter {
/* cache object */
ocf_cache_t cache;
/* target partition id */
ocf_part_id_t part_id;
/* target partition */
struct ocf_user_part *part;
/* per-partition cacheline iterator */
ocf_cache_line_t curr_cline[OCF_NUM_EVICTION_LISTS];
/* available (non-empty) eviction list bitmap rotated so that current
@evp is on the most significant bit */
unsigned long long next_avail_evp;
/* number of available eviction lists */
uint32_t num_avail_evps;
/* current eviction list index */
uint32_t evp;
};
struct ocf_lst_entry lst_valid;
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
struct ocf_lru_iter eviction_clean_iter;
uint32_t next_eviction_list;
struct ocf_lst_entry lst_valid;
};

View File

@ -412,7 +412,7 @@ struct ocf_metadata_lock
{
env_rwsem global; /*!< global metadata lock (GML) */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction; /*!< Fast lock for eviction policy */
env_spinlock eviction[OCF_NUM_EVICTION_LISTS]; /*!< Fast lock for eviction policy */
env_rwsem *hash; /*!< Hash bucket locks */
env_rwsem *collision_pages; /*!< Collision table page locks */
env_spinlock partition[OCF_IO_CLASS_MAX]; /* partition lock */

View File

@ -49,4 +49,11 @@
/* call conditional reschedule with default interval */
#define OCF_COND_RESCHED_DEFAULT(cnt) OCF_COND_RESCHED(cnt, 1000000)
static inline unsigned long long
ocf_rotate_right(unsigned long long bits, unsigned shift, unsigned width)
{
return ((bits >> shift) | (bits << (width - shift))) &
((1ULL << width) - 1);
}
#endif

View File

@ -19,7 +19,8 @@
* @retval Non-zero Means skip this cache line and do not clean it
*/
typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line);
void *getter_context, uint32_t item,
ocf_cache_line_t *line);
/**
* @brief Cleaning attributes for clean request