Initial commit

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2018-11-29 15:14:21 +01:00
commit a8e1ce8cc5
178 changed files with 35378 additions and 0 deletions

19
src/eviction/eviction.c Normal file
View File

@@ -0,0 +1,19 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
[ocf_eviction_lru] = {
.init_cline = evp_lru_init_cline,
.rm_cline = evp_lru_rm_cline,
.req_clines = evp_lru_req_clines,
.hot_cline = evp_lru_hot_cline,
.init_evp = evp_lru_init_evp,
.dirty_cline = evp_lru_dirty_cline,
.clean_cline = evp_lru_clean_cline,
.name = "lru",
},
};

56
src/eviction/eviction.h Normal file
View File

@@ -0,0 +1,56 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_EVICTION_POLICY_H__
#define __LAYER_EVICTION_POLICY_H__
#define OCF_PENDING_EVICTION_LIMIT 512UL
#include "ocf/ocf.h"
#include "lru.h"
#include "lru_structs.h"
struct eviction_policy {
union {
struct lru_eviction_policy lru;
} policy;
};
/* Eviction policy metadata per cache line */
union eviction_policy_meta {
struct lru_eviction_policy_meta lru;
} __attribute__((packed));
/* the caller must hold the metadata lock for all operations
*
* For range operations the caller can:
* set core_id to -1 to purge the whole cache device
* set core_id to -2 to purge the whole cache partition
*/
struct eviction_policy_ops {
void (*init_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*rm_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
bool (*can_evict)(struct ocf_cache *cache);
uint32_t (*req_clines)(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id,
uint32_t cline_no, ocf_core_id_t core_id);
void (*hot_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*init_evp)(struct ocf_cache *cache,
ocf_part_id_t part_id);
void (*dirty_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
void (*clean_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
const char *name;
};
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
#endif

503
src/eviction/lru.c Normal file
View File

@@ -0,0 +1,503 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
#include "lru.h"
#include "ops.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#include "../mngt/ocf_mngt_common.h"
#include "../engine/engine_zero.h"
#include "../utils/utils_rq.h"
#define OCF_EVICTION_MAX_SCAN 1024
/* -- Start of LRU functions --*/
/* Returns 1 if the given collision_index is the _head_ of
* the LRU list, 0 otherwise.
*/
/* static inline int is_lru_head(unsigned collision_index) {
* return collision_index == lru_list.lru_head;
* }
*/
#define is_lru_head(x) (x == collision_table_entries)
#define is_lru_tail(x) (x == collision_table_entries)
/* Sets the given collision_index as the new _head_ of the LRU list. */
static inline void update_lru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_head = collision_index;
else
part->runtime->eviction.policy.lru.clean_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the LRU list. */
static inline void update_lru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_tail = collision_index;
else
part->runtime->eviction.policy.lru.clean_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_ of
* the LRU list.
*/
static inline void update_lru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index, int cline_dirty)
{
update_lru_head(cache, partition_id, collision_index, cline_dirty);
update_lru_tail(cache, partition_id, collision_index, cline_dirty);
}
/* Adds the given collision_index to the _head_ of the LRU list */
static void add_lru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
unsigned int curr_head_index;
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* First node to be added/ */
if ((cline_dirty && !part->runtime->eviction.policy.lru.has_dirty_nodes) ||
(!cline_dirty && !part->runtime->eviction.policy.lru.has_clean_nodes)) {
update_lru_head_tail(cache, partition_id, collision_index, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 1;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 1;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
} else {
union eviction_policy_meta eviction_curr;
/* Not the first node to be added. */
curr_head_index = cline_dirty ?
part->runtime->eviction.policy.lru.dirty_head :
part->runtime->eviction.policy.lru.clean_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, curr_head_index,
&eviction_curr);
eviction.lru.next = curr_head_index;
eviction.lru.prev = collision_table_entries;
eviction_curr.lru.prev = collision_index;
update_lru_head(cache, partition_id, collision_index, cline_dirty);
ocf_metadata_set_evicition_policy(cache, curr_head_index,
&eviction_curr);
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
}
}
/* Deletes the node with the given collision_index from the lru list */
static void remove_lru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
int is_clean_head = 0, is_clean_tail = 0, is_dirty_head = 0, is_dirty_tail = 0;
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* Find out if this node is LRU _head_ or LRU _tail_ */
if (part->runtime->eviction.policy.lru.clean_head == collision_index)
is_clean_head = 1;
if (part->runtime->eviction.policy.lru.dirty_head == collision_index)
is_dirty_head = 1;
if (part->runtime->eviction.policy.lru.clean_tail == collision_index)
is_clean_tail = 1;
if (part->runtime->eviction.policy.lru.dirty_tail == collision_index)
is_dirty_tail = 1;
ENV_BUG_ON((is_clean_tail || is_clean_head) && (is_dirty_tail || is_dirty_head));
/* Set prev and next (even if not existent) */
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
/* Case 1: If we are head AND tail, there is only one node.
* So unlink node and set that there is no node left in the list.
*/
if ((is_clean_head && is_clean_tail) || (is_dirty_head && is_dirty_tail)) {
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
update_lru_head_tail(cache, partition_id, collision_table_entries, cline_dirty);
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
update_lru_head_tail(cache, partition_id,
collision_table_entries, cline_dirty);
}
/* Case 2: else if this collision_index is LRU head, but not tail,
* update head and return
*/
else if ((!is_clean_tail && is_clean_head) || (!is_dirty_tail && is_dirty_head)) {
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
update_lru_head(cache, partition_id, next_lru_node, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction_next.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
}
/* Case 3: else if this collision_index is LRU tail, but not head,
* update tail and return
*/
else if ((is_clean_tail && !is_clean_head) || (is_dirty_tail && !is_dirty_head)) {
union eviction_policy_meta eviction_prev;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
update_lru_tail(cache, partition_id, prev_lru_node, cline_dirty);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
eviction.lru.prev = collision_table_entries;
eviction_prev.lru.next = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
/* Case 4: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
union eviction_policy_meta eviction_prev;
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
/* Update prev and next nodes */
eviction_prev.lru.next = eviction.lru.next;
eviction_next.lru.prev = eviction.lru.prev;
/* Update the given node */
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
}
/*-- End of LRU functions*/
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
union eviction_policy_meta eviction;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
eviction.lru.prev = cache->device->collision_table_entries;
eviction.lru.next = cache->device->collision_table_entries;
ocf_metadata_set_evicition_policy(cache, cline, &eviction);
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
remove_lru_list(cache, part_id, cline, metadata_test_dirty(cache, cline));
}
static void evp_lru_clean_end(void *private_data, int error)
{
env_atomic *cleaning_in_progress = private_data;
env_atomic_set(cleaning_in_progress, 0);
}
static int evp_lru_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
union eviction_policy_meta eviction;
struct ocf_cleaner_attribs *attribs = getter_context;
ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item;
while (curr_cline < cache->device->collision_table_entries) {
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline));
*line = curr_cline;
attribs->getter_item = prev_cline;
return 0;
}
return -1;
}
static void evp_lru_clean(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t count)
{
env_atomic *progress = &cache->cleaning[part_id];
struct ocf_user_part *part = &cache->user_parts[part_id];
if (ocf_mngt_is_cache_locked(cache))
return;
if (env_atomic_cmpxchg(progress, 0, 1) == 0) {
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = true,
.do_sort = true,
.cmpl_context = progress,
.cmpl_fn = evp_lru_clean_end,
.getter = evp_lru_clean_getter,
.getter_context = &attribs,
.getter_item = part->runtime->eviction.policy.lru.dirty_tail,
.count = count > 32 ? 32 : count,
.io_queue = io_queue
};
ocf_cleaner_fire(cache, &attribs);
}
}
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
{
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
}
static void evp_lru_zero_line(struct ocf_cache *cache, uint32_t io_queue,
ocf_cache_line_t line)
{
struct ocf_request *rq;
ocf_core_id_t id;
uint64_t addr, core_line;
ocf_metadata_get_core_info(cache, line, &id, &core_line);
addr = core_line * ocf_line_size(cache);
rq = ocf_rq_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE);
if (rq) {
rq->info.internal = true;
rq->complete = evp_lru_zero_line_complete;
rq->io_queue = io_queue;
env_atomic_inc(&cache->pending_eviction_clines);
ocf_engine_zero_line(rq);
}
}
bool evp_lru_can_evict(struct ocf_cache *cache)
{
if (env_atomic_read(&cache->pending_eviction_clines) >=
OCF_PENDING_EVICTION_LIMIT) {
return false;
}
return true;
}
/* the caller must hold the metadata lock */
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no, ocf_core_id_t core_id)
{
uint32_t i;
ocf_cache_line_t curr_cline, prev_cline;
struct ocf_user_part *part = &cache->user_parts[part_id];
union eviction_policy_meta eviction;
if (cline_no == 0)
return 0;
i = 0;
curr_cline = part->runtime->eviction.policy.lru.clean_tail;
/* Find cachelines to be evicted. */
while (i < cline_no) {
ENV_BUG_ON(curr_cline > cache->device->collision_table_entries);
if (!evp_lru_can_evict(cache))
break;
if (curr_cline == cache->device->collision_table_entries)
break;
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(metadata_test_dirty(cache, curr_cline));
if (ocf_data_obj_is_atomic(&cache->device->obj)) {
/* atomic cache, we have to trim cache lines before
* eviction
*/
evp_lru_zero_line(cache, io_queue, curr_cline);
} else {
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
curr_cline);
/* Goto next item. */
i++;
}
curr_cline = prev_cline;
}
if (i < cline_no && part->runtime->eviction.policy.lru.dirty_tail !=
cache->device->collision_table_entries) {
evp_lru_clean(cache, io_queue, part_id, cline_no - i);
}
/* Return number of clines that were really evicted */
return i;
}
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
union eviction_policy_meta eviction;
int cline_dirty;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
cline_dirty = metadata_test_dirty(cache, cline);
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->eviction.policy.lru.clean_head == cline) &&
(part->runtime->eviction.policy.lru.clean_tail == cline)) ||
((part->runtime->eviction.policy.lru.dirty_head == cline) &&
(part->runtime->eviction.policy.lru.dirty_tail == cline))) {
remove_lru_list(cache, part_id, cline, cline_dirty);
}
/* Update LRU */
add_lru_head(cache, part_id, cline, cline_dirty);
}
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id)
{
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
part->runtime->eviction.policy.lru.clean_head = collision_table_entries;
part->runtime->eviction.policy.lru.clean_tail = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_head = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_tail = collision_table_entries;
}
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 1);
add_lru_head(cache, part_id, cline, 0);
OCF_METADATA_EVICTION_UNLOCK();
}
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 0);
add_lru_head(cache, part_id, cline, 1);
OCF_METADATA_EVICTION_UNLOCK();
}

23
src/eviction/lru.h Normal file
View File

@@ -0,0 +1,23 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_H__
#define __EVICTION_LRU_H__
#include "eviction.h"
#include "lru_structs.h"
void evp_lru_init_cline(struct ocf_cache *cache,
ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no,
ocf_core_id_t core_id);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id);
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
#endif

View File

@@ -0,0 +1,24 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_STRUCTS_H__
#define __EVICTION_LRU_STRUCTS_H__
struct lru_eviction_policy_meta {
/* LRU pointers 2*4=8 bytes */
uint32_t prev;
uint32_t next;
} __attribute__((packed));
struct lru_eviction_policy {
int has_clean_nodes;
int has_dirty_nodes;
uint32_t dirty_head;
uint32_t dirty_tail;
uint32_t clean_head;
uint32_t clean_tail;
};
#endif

108
src/eviction/ops.h Normal file
View File

@@ -0,0 +1,108 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef LAYER_EVICTION_POLICY_OPS_H_
#define LAYER_EVICTION_POLICY_OPS_H_
#include "eviction.h"
#include "../metadata/metadata.h"
/**
* @brief Initialize cache line before adding it into eviction
*
* @note This operation is called under WR metadata lock
*/
static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
uint8_t type;
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_cline))
evict_policy_ops[type].init_cline(cache, line);
}
static inline void ocf_eviction_purge_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].rm_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].rm_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
if (likely(evict_policy_ops[type].can_evict))
return evict_policy_ops[type].can_evict(cache);
return true;
}
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id, uint32_t clines,
ocf_core_id_t core_id)
{
uint8_t type;
uint32_t result = 0;
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].req_clines)) {
/*
* This is called under METADATA WR lock. No need to get
* eviction lock.
*/
result = evict_policy_ops[type].req_clines(cache, io_queue,
part_id, clines, core_id);
}
return result;
}
static inline void ocf_eviction_set_hot_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].hot_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].hot_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline void ocf_eviction_initialize(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].init_evp(cache, part_id);
OCF_METADATA_EVICTION_UNLOCK();
}
}
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */