Merge pull request #507 from arutk/remove_part_list

remove partition list
This commit is contained in:
Robert Baldyga 2021-06-21 15:49:35 +02:00 committed by GitHub
commit c8268245ba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
88 changed files with 2019 additions and 3858 deletions

View File

@ -72,9 +72,6 @@ struct ocf_cache_info {
set as a result of reaching IO error threshold */
} fallback_pt;
ocf_eviction_t eviction_policy;
/*!< Eviction policy selected */
ocf_cleaning_t cleaning_policy;
/*!< Cleaning policy selected */

View File

@ -199,20 +199,6 @@ typedef enum {
/*!< Default sequential cutoff policy*/
} ocf_seq_cutoff_policy;
/**
* OCF supported eviction policy types
*/
typedef enum {
ocf_eviction_lru = 0,
/*!< Last recently used eviction policy */
ocf_eviction_max,
/*!< Stopper of enumerator */
ocf_eviction_default = ocf_eviction_lru,
/*!< Default eviction policy */
} ocf_eviction_t;
/**
* OCF supported promotion policy types
*/
@ -308,7 +294,7 @@ typedef enum {
/**
* Maximum numbers of IO classes per cache instance
*/
#define OCF_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
#define OCF_USER_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
/**
* Minimum value of a valid IO class ID
*/
@ -316,11 +302,11 @@ typedef enum {
/**
* Maximum value of a valid IO class ID
*/
#define OCF_IO_CLASS_ID_MAX (OCF_IO_CLASS_MAX - 1)
#define OCF_IO_CLASS_ID_MAX (OCF_USER_IO_CLASS_MAX - 1)
/**
* Invalid value of IO class id
*/
#define OCF_IO_CLASS_INVALID OCF_IO_CLASS_MAX
#define OCF_IO_CLASS_INVALID OCF_USER_IO_CLASS_MAX
/** Maximum size of the IO class name */
#define OCF_IO_CLASS_NAME_MAX 1024

View File

@ -44,9 +44,6 @@ struct ocf_io_class_info {
* of ioclass's cachelines are evicted.
*/
uint8_t eviction_policy_type;
/*!< The type of eviction policy for given IO class */
ocf_cleaning_t cleaning_policy_type;
/*!< The type of cleaning policy for given IO class */
};

View File

@ -246,11 +246,6 @@ struct ocf_mngt_cache_config {
*/
ocf_cache_mode_t cache_mode;
/**
* @brief Eviction policy type
*/
ocf_eviction_t eviction_policy;
/**
* @brief Promotion policy type
*/
@ -307,7 +302,6 @@ static inline void ocf_mngt_cache_config_set_default(
struct ocf_mngt_cache_config *cfg)
{
cfg->cache_mode = ocf_cache_mode_default;
cfg->eviction_policy = ocf_eviction_default;
cfg->promotion_policy = ocf_promotion_default;
cfg->cache_line_size = ocf_cache_line_size_4;
cfg->metadata_layout = ocf_metadata_layout_default;
@ -874,7 +868,7 @@ struct ocf_mngt_io_class_config {
};
struct ocf_mngt_io_classes_config {
struct ocf_mngt_io_class_config config[OCF_IO_CLASS_MAX];
struct ocf_mngt_io_class_config config[OCF_USER_IO_CLASS_MAX];
};
/**

View File

@ -654,7 +654,7 @@ void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID,
return ocf_metadata_actor(cache, PARTITION_UNSPECIFIED,
core_id, start_byte, end_byte,
cleaning_policy_acp_purge_block);
}

View File

@ -9,7 +9,7 @@
#include "alru.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_realloc.h"
#include "../concurrency/ocf_cache_line_concurrency.h"
#include "../ocf_def_priv.h"
@ -55,62 +55,33 @@ struct alru_flush_ctx {
struct alru_context {
struct alru_flush_ctx flush_ctx;
env_spinlock list_lock[OCF_IO_CLASS_MAX];
env_spinlock list_lock[OCF_USER_IO_CLASS_MAX];
};
/* -- Start of ALRU functions -- */
/* Sets the given collision_index as the new _head_ of the ALRU list. */
static inline void update_alru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the ALRU list. */
static inline void update_alru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_
* of the ALRU list.
*/
static inline void update_alru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
update_alru_head(cache, partition_id, collision_index);
update_alru_tail(cache, partition_id, collision_index);
}
/* Adds the given collision_index to the _head_ of the ALRU list */
static void add_alru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
unsigned int curr_head_index;
unsigned int collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
.clean_pol->policy.alru;
struct alru_cleaning_policy_meta *alru;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ENV_BUG_ON(env_atomic_read(
&part->runtime->cleaning.policy.alru.size) < 0);
ENV_BUG_ON(env_atomic_read(&part_alru->size) < 0);
ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
/* First node to be added/ */
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
update_alru_head_tail(cache, partition_id, collision_index);
if (env_atomic_read(&part_alru->size) == 0) {
part_alru->lru_head = collision_index;
part_alru->lru_tail = collision_index;
alru = &ocf_metadata_get_cleaning_policy(cache,
collision_index)->meta.alru;
@ -121,7 +92,7 @@ static void add_alru_head(struct ocf_cache *cache, int partition_id,
} else {
/* Not the first node to be added. */
curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
curr_head_index = part_alru->lru_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
@ -136,10 +107,10 @@ static void add_alru_head(struct ocf_cache *cache, int partition_id,
curr_head_index)->meta.alru;
alru->lru_prev = collision_index;
update_alru_head(cache, partition_id, collision_index);
part_alru->lru_head = collision_index;
}
env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
env_atomic_inc(&part_alru->size);
}
/* Deletes the node with the given collision_index from the ALRU list */
@ -148,14 +119,13 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
.clean_pol->policy.alru;
struct alru_cleaning_policy_meta *alru;
ENV_BUG_ON(!(collision_index < collision_table_entries));
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
if (env_atomic_read(&part_alru->size) == 0) {
ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
"from empty ALRU Cleaning Policy queue!\n");
ENV_BUG();
@ -170,29 +140,29 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
/* Check if entry is not part of the ALRU list */
if ((next_lru_node == collision_table_entries) &&
(prev_lru_node == collision_table_entries) &&
(cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail != collision_index)) {
(part_alru->lru_head != collision_index) &&
(part_alru->lru_tail != collision_index)) {
return;
}
/* Case 0: If we are head AND tail, there is only one node. So unlink
* node and set that there is no node left in the list.
*/
if (cleaning_policy->lru_head == collision_index &&
cleaning_policy->lru_tail == collision_index) {
if (part_alru->lru_head == collision_index &&
part_alru->lru_tail == collision_index) {
alru->lru_next = collision_table_entries;
alru->lru_prev = collision_table_entries;
update_alru_head_tail(cache, partition_id,
collision_table_entries);
part_alru->lru_head = collision_table_entries;
part_alru->lru_tail = collision_table_entries;
}
/* Case 1: else if this collision_index is ALRU head, but not tail,
* update head and return
*/
else if ((cleaning_policy->lru_tail != collision_index) &&
(cleaning_policy->lru_head == collision_index)) {
else if ((part_alru->lru_tail != collision_index) &&
(part_alru->lru_head == collision_index)) {
struct alru_cleaning_policy_meta *next_alru;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
@ -200,7 +170,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
next_alru = &ocf_metadata_get_cleaning_policy(cache,
next_lru_node)->meta.alru;
update_alru_head(cache, partition_id, next_lru_node);
part_alru->lru_head = next_lru_node;
alru->lru_next = collision_table_entries;
next_alru->lru_prev = collision_table_entries;
@ -210,8 +180,8 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
/* Case 2: else if this collision_index is ALRU tail, but not head,
* update tail and return
*/
else if ((cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail == collision_index)) {
else if ((part_alru->lru_head != collision_index) &&
(part_alru->lru_tail == collision_index)) {
struct alru_cleaning_policy_meta *prev_alru;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
@ -219,7 +189,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
prev_alru = &ocf_metadata_get_cleaning_policy(cache,
prev_lru_node)->meta.alru;
update_alru_tail(cache, partition_id, prev_lru_node);
part_alru->lru_tail = prev_lru_node;
alru->lru_prev = collision_table_entries;
prev_alru->lru_next = collision_table_entries;
@ -249,7 +219,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
}
env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
env_atomic_dec(&part_alru->size);
}
static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
@ -257,9 +227,8 @@ static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
.clean_pol->policy.alru;
struct alru_cleaning_policy_meta *alru;
ENV_BUG_ON(!(collision_index < collision_table_entries));
@ -270,8 +239,8 @@ static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
next_lru_node = alru->lru_next;
prev_lru_node = alru->lru_prev;
return cleaning_policy->lru_tail == collision_index ||
cleaning_policy->lru_head == collision_index ||
return part_alru->lru_tail == collision_index ||
part_alru->lru_head == collision_index ||
next_lru_node != collision_table_entries ||
prev_lru_node != collision_table_entries;
}
@ -321,13 +290,12 @@ static void __cleaning_policy_alru_purge_cache_block_any(
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte) {
struct ocf_user_part *part;
struct ocf_user_part *user_part;
ocf_part_id_t part_id;
int ret = 0;
for_each_part(cache, part, part_id) {
if (env_atomic_read(&part->runtime->cleaning.
policy.alru.size) == 0)
for_each_user_part(cache, user_part, part_id) {
if (env_atomic_read(&user_part->clean_pol->policy.alru.size) == 0)
continue;
ret |= ocf_metadata_actor(cache, part_id,
@ -344,8 +312,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
struct alru_context *ctx = cache->cleaner.cleaning_policy_context;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
struct ocf_user_part *part = &cache->user_parts[part_id];
struct alru_cleaning_policy *part_alru = &cache->user_parts[part_id]
.clean_pol->policy.alru;
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct alru_cleaning_policy_meta *alru;
@ -362,10 +330,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->cleaning.policy.
alru.lru_head == cache_line) &&
(part->runtime->cleaning.policy.
alru.lru_tail == cache_line)))
((part_alru->lru_head == cache_line) &&
(part_alru->lru_tail == cache_line)))
remove_alru_list(cache, part_id, cache_line);
add_alru_head(cache, part_id, cache_line);
@ -375,19 +341,19 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
static void _alru_rebuild(struct ocf_cache *cache)
{
struct ocf_user_part *part;
struct ocf_user_part *user_part;
struct alru_cleaning_policy *part_alru;
ocf_part_id_t part_id;
ocf_core_id_t core_id;
ocf_cache_line_t cline;
uint32_t step = 0;
for_each_part(cache, part, part_id) {
for_each_user_part(cache, user_part, part_id) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
part_alru = &user_part->clean_pol->policy.alru;
env_atomic_set(&part_alru->size, 0);
part_alru->lru_head = cache->device->collision_table_entries;
part_alru->lru_tail = cache->device->collision_table_entries;
cache->device->runtime_meta->cleaning_thread_access = 0;
}
@ -410,15 +376,16 @@ static void _alru_rebuild(struct ocf_cache *cache)
}
static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
struct ocf_user_part *part, int init_metadata)
struct ocf_user_part *user_part, int init_metadata)
{
struct alru_cleaning_policy *part_alru =
&user_part->clean_pol->policy.alru;
if (init_metadata) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
env_atomic_set(&part_alru->size, 0);
part_alru->lru_head = cache->device->collision_table_entries;
part_alru->lru_tail = cache->device->collision_table_entries;
}
cache->device->runtime_meta->cleaning_thread_access = 0;
@ -440,7 +407,7 @@ void cleaning_policy_alru_setup(struct ocf_cache *cache)
int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
{
struct ocf_user_part *part;
struct ocf_user_part *user_part;
ocf_part_id_t part_id;
struct alru_context *ctx;
int error = 0;
@ -452,7 +419,7 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
return -OCF_ERR_NO_MEM;
}
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
error = env_spinlock_init(&ctx->list_lock[i]);
if (error)
break;
@ -468,9 +435,9 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
cache->cleaner.cleaning_policy_context = ctx;
for_each_part(cache, part, part_id) {
for_each_user_part(cache, user_part, part_id) {
cleaning_policy_alru_initialize_part(cache,
part, init_metadata);
user_part, init_metadata);
}
if (init_metadata)
@ -486,7 +453,7 @@ void cleaning_policy_alru_deinitialize(struct ocf_cache *cache)
struct alru_context *alru = cache->cleaner.cleaning_policy_context;
unsigned i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
env_spinlock_destroy(&alru->list_lock[i]);
env_vfree(cache->cleaner.cleaning_policy_context);
@ -697,17 +664,17 @@ static int get_data_to_flush(struct alru_context *ctx)
struct alru_cleaning_policy_config *config;
struct alru_cleaning_policy_meta *alru;
ocf_cache_line_t cache_line;
struct ocf_user_part *part;
struct ocf_user_part *user_part;
uint32_t last_access;
int to_flush = 0;
int part_id = OCF_IO_CLASS_ID_MAX;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
for_each_part(cache, part, part_id) {
for_each_user_part(cache, user_part, part_id) {
env_spinlock_lock(&ctx->list_lock[part_id]);
cache_line = part->runtime->cleaning.policy.alru.lru_tail;
cache_line = user_part->clean_pol->policy.alru.lru_tail;
last_access = compute_timestamp(config);

View File

@ -233,15 +233,6 @@ bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
return !ocf_alock_waitlist_is_empty(alock, line);
}
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_alock *alock =
ocf_cache_line_concurrency(cache);
return ocf_alock_is_locked_exclusively(alock, line);
}
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_alock *alock)
{
return ocf_alock_waitlist_count(alock);

View File

@ -141,9 +141,6 @@ bool ocf_cache_line_is_used(struct ocf_alock *c,
bool ocf_cache_line_are_waiters(struct ocf_alock *c,
ocf_cache_line_t line);
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from write or read access.
*

View File

@ -14,8 +14,8 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
unsigned part_iter;
unsigned global_iter;
for (evp_iter = 0; evp_iter < OCF_NUM_EVICTION_LISTS; evp_iter++)
env_rwlock_init(&metadata_lock->eviction[evp_iter]);
for (evp_iter = 0; evp_iter < OCF_NUM_LRU_LISTS; evp_iter++)
env_rwlock_init(&metadata_lock->lru[evp_iter]);
for (global_iter = 0; global_iter < OCF_NUM_GLOBAL_META_LOCKS;
global_iter++) {
@ -24,7 +24,7 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
goto global_err;
}
for (part_iter = 0; part_iter < OCF_IO_CLASS_MAX; part_iter++) {
for (part_iter = 0; part_iter < OCF_USER_IO_CLASS_MAX; part_iter++) {
err = env_spinlock_init(&metadata_lock->partition[part_iter]);
if (err)
goto partition_err;
@ -41,7 +41,7 @@ global_err:
env_rwsem_destroy(&metadata_lock->global[global_iter].sem);
while (evp_iter--)
env_rwlock_destroy(&metadata_lock->eviction[evp_iter]);
env_rwlock_destroy(&metadata_lock->lru[evp_iter]);
return err;
}
@ -50,11 +50,11 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
{
unsigned i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
env_spinlock_destroy(&metadata_lock->partition[i]);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
env_rwlock_destroy(&metadata_lock->eviction[i]);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
env_rwlock_destroy(&metadata_lock->lru[i]);
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++)
env_rwsem_destroy(&metadata_lock->global[i].sem);

View File

@ -3,7 +3,7 @@
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../ocf_cache_priv.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
#include "../ocf_queue_priv.h"
#ifndef __OCF_METADATA_CONCURRENCY_H__
@ -28,69 +28,69 @@ int ocf_metadata_concurrency_attached_init(
void ocf_metadata_concurrency_attached_deinit(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_eviction_wr_lock(
static inline void ocf_metadata_lru_wr_lock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_write_lock(&metadata_lock->eviction[ev_list]);
env_rwlock_write_lock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_wr_unlock(
static inline void ocf_metadata_lru_wr_unlock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_write_unlock(&metadata_lock->eviction[ev_list]);
env_rwlock_write_unlock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_rd_lock(
static inline void ocf_metadata_lru_rd_lock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_read_lock(&metadata_lock->eviction[ev_list]);
env_rwlock_read_lock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_rd_unlock(
static inline void ocf_metadata_lru_rd_unlock(
struct ocf_metadata_lock *metadata_lock, unsigned ev_list)
{
env_rwlock_read_unlock(&metadata_lock->eviction[ev_list]);
env_rwlock_read_unlock(&metadata_lock->lru[ev_list]);
}
static inline void ocf_metadata_eviction_wr_lock_all(
static inline void ocf_metadata_lru_wr_lock_all(
struct ocf_metadata_lock *metadata_lock)
{
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_wr_lock(metadata_lock, i);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
ocf_metadata_lru_wr_lock(metadata_lock, i);
}
static inline void ocf_metadata_eviction_wr_unlock_all(
static inline void ocf_metadata_lru_wr_unlock_all(
struct ocf_metadata_lock *metadata_lock)
{
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
ocf_metadata_eviction_wr_unlock(metadata_lock, i);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
ocf_metadata_lru_wr_unlock(metadata_lock, i);
}
#define OCF_METADATA_EVICTION_WR_LOCK(cline) \
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_WR_LOCK(cline) \
ocf_metadata_lru_wr_lock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_WR_UNLOCK(cline) \
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_WR_UNLOCK(cline) \
ocf_metadata_lru_wr_unlock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_RD_LOCK(cline) \
ocf_metadata_eviction_rd_lock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_RD_LOCK(cline) \
ocf_metadata_lru_rd_lock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_RD_UNLOCK(cline) \
ocf_metadata_eviction_rd_unlock(&cache->metadata.lock, \
cline % OCF_NUM_EVICTION_LISTS)
#define OCF_METADATA_LRU_RD_UNLOCK(cline) \
ocf_metadata_lru_rd_unlock(&cache->metadata.lock, \
cline % OCF_NUM_LRU_LISTS)
#define OCF_METADATA_EVICTION_WR_LOCK_ALL() \
ocf_metadata_eviction_wr_lock_all(&cache->metadata.lock)
#define OCF_METADATA_LRU_WR_LOCK_ALL() \
ocf_metadata_lru_wr_lock_all(&cache->metadata.lock)
#define OCF_METADATA_EVICTION_WR_UNLOCK_ALL() \
ocf_metadata_eviction_wr_unlock_all(&cache->metadata.lock)
#define OCF_METADATA_LRU_WR_UNLOCK_ALL() \
ocf_metadata_lru_wr_unlock_all(&cache->metadata.lock)
static inline void ocf_metadata_partition_lock(
struct ocf_metadata_lock *metadata_lock,

View File

@ -21,11 +21,11 @@
#include "engine_discard.h"
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_refcnt.h"
#include "../ocf_request.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
enum ocf_io_if_type {
/* Public OCF IO interfaces to be set by user */
@ -192,8 +192,8 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
return;
}
req->cache_mode = ocf_part_get_cache_mode(cache,
ocf_part_class2id(cache, req->part_id));
req->cache_mode = ocf_user_part_get_cache_mode(cache,
ocf_user_part_class2id(cache, req->part_id));
if (!ocf_cache_mode_is_valid(req->cache_mode))
req->cache_mode = cache->conf_meta->cache_mode;

View File

@ -11,8 +11,7 @@ struct ocf_request;
#define LOOKUP_HIT 5
#define LOOKUP_MISS 6
#define LOOKUP_INSERTED 8
#define LOOKUP_REMAPPED 9
#define LOOKUP_REMAPPED 8
typedef enum {
/* modes inherited from user API */

View File

@ -7,16 +7,15 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_freelist.h"
#include "engine_common.h"
#define OCF_ENGINE_DEBUG_IO_NAME "common"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
#include "../ocf_request.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
#include "../promotion/promotion.h"
#include "../concurrency/ocf_concurrency.h"
@ -123,15 +122,6 @@ void ocf_engine_patch_req_info(struct ocf_cache *cache,
req->info.insert_no++;
if (req->part_id != ocf_metadata_get_partition_id(cache,
entry->coll_idx)) {
/*
* Need to move this cache line into other partition
*/
entry->re_part = true;
req->info.re_part_no++;
}
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
req->info.seq_no++;
if (idx + 1 < req->core_line_count &&
@ -152,8 +142,7 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache,
ENV_BUG_ON(entry->status != LOOKUP_HIT &&
entry->status != LOOKUP_MISS &&
entry->status != LOOKUP_REMAPPED &&
entry->status != LOOKUP_INSERTED);
entry->status != LOOKUP_REMAPPED);
/* Handle return value */
if (entry->status == LOOKUP_HIT) {
@ -187,10 +176,8 @@ static void ocf_engine_update_req_info(struct ocf_cache *cache,
}
if (entry->status == LOOKUP_INSERTED ||
entry->status == LOOKUP_REMAPPED) {
if (entry->status == LOOKUP_REMAPPED)
req->info.insert_no++;
}
/* Check if cache hit is sequential */
if (idx > 0 && ocf_engine_clines_phys_cont(req, idx - 1))
@ -215,7 +202,7 @@ static void ocf_engine_set_hot(struct ocf_request *req)
if (status == LOOKUP_HIT) {
/* Update eviction (LRU) */
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
ocf_lru_hot_cline(cache, entry->coll_idx);
}
}
}
@ -336,26 +323,6 @@ void ocf_map_cache_line(struct ocf_request *req,
}
static void ocf_engine_map_cache_line(struct ocf_request *req,
unsigned int idx)
{
struct ocf_cache *cache = req->cache;
ocf_cache_line_t cache_line;
if (!ocf_freelist_get_cache_line(cache->freelist, &cache_line)) {
ocf_req_set_mapping_error(req);
return;
}
ocf_metadata_add_to_partition(cache, req->part_id, cache_line);
ocf_map_cache_line(req, idx, cache_line);
/* Update LRU:: Move this node to head of lru list. */
ocf_eviction_init_cache_line(cache, cache_line);
ocf_eviction_set_hot_cache_line(cache, cache_line);
}
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
struct ocf_request *req)
{
@ -370,7 +337,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
case LOOKUP_MISS:
break;
case LOOKUP_INSERTED:
case LOOKUP_REMAPPED:
OCF_DEBUG_RQ(req, "Canceling cache line %u",
entry->coll_idx);
@ -395,56 +361,6 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
}
}
static void ocf_engine_map(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
uint32_t i;
struct ocf_map_info *entry;
uint64_t core_line;
ocf_core_id_t core_id = ocf_core_get_id(req->core);
ocf_req_clear_info(req);
OCF_DEBUG_TRACE(req->cache);
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
entry = &(req->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
/* attempt mapping only if no mapping error previously,
* otherwise continue the loop anyway to have request fully
* traversed after map()
*/
if (entry->status != LOOKUP_HIT &&
!ocf_req_test_mapping_error(req)) {
ocf_engine_map_cache_line(req, i);
if (!ocf_req_test_mapping_error(req))
entry->status = LOOKUP_INSERTED;
}
if (entry->status != LOOKUP_MISS)
ocf_engine_update_req_info(cache, req, i);
OCF_DEBUG_PARAM(req->cache,
"%s, cache line %u, core line = %llu",
entry->status == LOOKUP_HIT ? "Hit" :
entry->status == LOOKUP_MISS : "Miss" :
"Insert",
entry->coll_idx, entry->core_line);
}
if (!ocf_req_test_mapping_error(req)) {
/* request has been inserted into cache - purge it from promotion
* policy */
ocf_promotion_req_purge(cache->promotion_policy, req);
}
OCF_DEBUG_PARAM(req->cache, "Sequential - %s",
ocf_engine_is_sequential(req) ? "Yes" : "No");
}
static void _ocf_engine_clean_end(void *private_data, int error)
{
struct ocf_request *req = private_data;
@ -469,21 +385,21 @@ static void _ocf_engine_clean_end(void *private_data, int error)
}
}
static void ocf_engine_evict(struct ocf_request *req)
static void ocf_engine_remap(struct ocf_request *req)
{
int status;
status = space_managment_evict_do(req);
status = ocf_space_managment_remap_do(req);
if (status == LOOKUP_MISS) {
/* mark error */
ocf_req_set_mapping_error(req);
/* unlock cachelines locked during eviction */
/* unlock cachelines locked during remapping */
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
req);
/* request cleaning */
ocf_req_set_clean_eviction(req);
ocf_req_set_cleaning_required(req);
/* unmap inserted and replaced cachelines */
ocf_engine_map_hndl_error(req->cache, req);
@ -495,29 +411,25 @@ static void ocf_engine_evict(struct ocf_request *req)
static int lock_clines(struct ocf_request *req)
{
struct ocf_alock *c = ocf_cache_line_concurrency(req->cache);
enum ocf_engine_lock_type lock_type =
req->engine_cbs->get_lock_type(req);
int lock_type = OCF_WRITE;
switch (lock_type) {
case ocf_engine_lock_write:
return ocf_req_async_lock_wr(c, req, req->engine_cbs->resume);
case ocf_engine_lock_read:
return ocf_req_async_lock_rd(c, req, req->engine_cbs->resume);
default:
return OCF_LOCK_ACQUIRED;
}
if (req->rw == OCF_READ && ocf_engine_is_hit(req))
lock_type = OCF_READ;
return lock_type == OCF_WRITE ?
ocf_req_async_lock_wr(c, req, req->engine_cbs->resume) :
ocf_req_async_lock_rd(c, req, req->engine_cbs->resume);
}
/* Attempt to map cachelines marked as LOOKUP_MISS by evicting from cache.
/* Attempt to map cachelines marked as LOOKUP_MISS.
* Caller must assure that request map info is up to date (request
* is traversed).
*/
static inline int ocf_prepare_clines_evict(struct ocf_request *req)
static inline void ocf_prepare_clines_miss(struct ocf_request *req)
{
int lock_status = -OCF_ERR_NO_LOCK;
bool part_has_space;
part_has_space = ocf_part_has_space(req);
part_has_space = ocf_user_part_has_space(req);
if (!part_has_space) {
/* adding more cachelines to target partition would overflow
it - requesting eviction from target partition only */
@ -527,63 +439,24 @@ static inline int ocf_prepare_clines_evict(struct ocf_request *req)
ocf_req_clear_part_evict(req);
}
ocf_engine_evict(req);
ocf_engine_remap(req);
if (!ocf_req_test_mapping_error(req)) {
if (!ocf_req_test_mapping_error(req))
ocf_promotion_req_purge(req->cache->promotion_policy, req);
lock_status = lock_clines(req);
if (lock_status < 0)
ocf_req_set_mapping_error(req);
}
return lock_status;
}
static inline int ocf_prepare_clines_miss(struct ocf_request *req)
{
int lock_status = -OCF_ERR_NO_LOCK;
/* requests to disabled partitions go in pass-through */
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
ocf_req_set_mapping_error(req);
return lock_status;
}
/* NOTE: ocf_part_has_space() below uses potentially stale request
* statistics (collected before hash bucket lock had been upgraded).
* It is ok since this check is opportunistic, as partition occupancy
* is also subject to change. */
if (!ocf_part_has_space(req)) {
ocf_engine_lookup(req);
return ocf_prepare_clines_evict(req);
}
ocf_engine_map(req);
if (!ocf_req_test_mapping_error(req)) {
lock_status = lock_clines(req);
if (lock_status < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
return lock_status;
}
/* Request mapping failed, but it is fully traversed as a side
* effect of ocf_engine_map(), so no need to repeat the traversation
* before eviction.
* */
req->info.mapping_error = false;
return ocf_prepare_clines_evict(req);
}
int ocf_engine_prepare_clines(struct ocf_request *req)
{
struct ocf_user_part *part = &req->cache->user_parts[req->part_id];
struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id];
bool mapped;
bool promote = true;
int lock = -OCF_ERR_NO_LOCK;
int result;
/* requests to disabled partitions go in pass-through */
if (!ocf_user_part_is_enabled(user_part)) {
ocf_req_set_mapping_error(req);
return -OCF_ERR_NO_LOCK;
}
/* Calculate hashes for hash-bucket locking */
ocf_req_hash(req);
@ -599,6 +472,9 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (mapped) {
lock = lock_clines(req);
if (lock < 0)
ocf_req_set_mapping_error(req);
else
ocf_engine_set_hot(req);
ocf_hb_req_prot_unlock_rd(req);
return lock;
@ -615,6 +491,10 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
/* Mapping must be performed holding (at least) hash-bucket write lock */
ocf_hb_req_prot_lock_upgrade(req);
/* Repeat lookup after upgrading lock */
ocf_engine_lookup(req);
if (unlikely(ocf_engine_is_mapped(req))) {
lock = lock_clines(req);
ocf_engine_set_hot(req);
@ -622,17 +502,27 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
return lock;
}
result = ocf_prepare_clines_miss(req);
ocf_prepare_clines_miss(req);
if (!ocf_req_test_mapping_error(req)) {
lock = lock_clines(req);
if (lock < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
}
if (!ocf_req_test_mapping_error(req))
ocf_engine_set_hot(req);
ocf_hb_req_prot_unlock_wr(req);
if (ocf_req_test_clean_eviction(req)) {
ocf_eviction_flush_dirty(req->cache, part, req->io_queue,
if (ocf_req_is_cleaning_required(req)) {
ocf_lru_clean(req->cache, user_part, req->io_queue,
128);
}
return result;
return lock;
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,

View File

@ -210,19 +210,6 @@ void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line);
/**
* @brief Request cacheline lock type
*/
enum ocf_engine_lock_type
{
/** No lock */
ocf_engine_lock_none = 0,
/** Write lock */
ocf_engine_lock_write,
/** Read lock */
ocf_engine_lock_read,
};
/**
* @brief Engine-specific callbacks for common request handling rountine
*
@ -230,9 +217,6 @@ enum ocf_engine_lock_type
*/
struct ocf_engine_callbacks
{
/** Specify locking requirements after request is mapped */
enum ocf_engine_lock_type (*get_lock_type)(struct ocf_request *req);
/** Resume handling after acquiring asynchronous lock */
ocf_req_async_lock_cb resume;
};

View File

@ -10,7 +10,7 @@
#include "engine_pt.h"
#include "engine_wb.h"
#include "../ocf_request.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../metadata/metadata.h"
@ -77,7 +77,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_user_part_move(req);
ocf_hb_req_prot_unlock_wr(req);
}
@ -126,7 +126,7 @@ int ocf_read_fast(struct ocf_request *req)
hit = ocf_engine_is_hit(req);
part_has_space = ocf_part_has_space(req);
part_has_space = ocf_user_part_has_space(req);
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
@ -198,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
part_has_space = ocf_part_has_space(req);
part_has_space = ocf_user_part_has_space(req);
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);

View File

@ -9,7 +9,7 @@
#include "cache_engine.h"
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
@ -75,7 +75,7 @@ int ocf_read_pt_do(struct ocf_request *req)
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_user_part_move(req);
ocf_hb_req_prot_unlock_wr(req);
}

View File

@ -15,7 +15,7 @@
#include "../utils/utils_io.h"
#include "../ocf_request.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../ocf_def_priv.h"
@ -182,7 +182,7 @@ static int _ocf_read_generic_do(struct ocf_request *req)
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_user_part_move(req);
ocf_hb_req_prot_unlock_wr(req);
}
@ -210,17 +210,8 @@ static const struct ocf_io_if _io_if_read_generic_resume = {
.write = _ocf_read_generic_do,
};
static enum ocf_engine_lock_type ocf_rd_get_lock_type(struct ocf_request *req)
{
if (ocf_engine_is_hit(req))
return ocf_engine_lock_read;
else
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _rd_engine_callbacks =
{
.get_lock_type = ocf_rd_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -14,7 +14,7 @@
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_request.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wb"
@ -135,7 +135,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_user_part_move(req);
ocf_hb_req_prot_unlock_wr(req);
}
@ -168,14 +168,8 @@ int ocf_write_wb_do(struct ocf_request *req)
return 0;
}
static enum ocf_engine_lock_type ocf_wb_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wb_engine_callbacks =
{
.get_lock_type = ocf_wb_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -12,7 +12,7 @@
#include "../metadata/metadata.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wo"

View File

@ -11,7 +11,7 @@
#include "../ocf_request.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
@ -123,7 +123,7 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_user_part_move(req);
}
ocf_hb_req_prot_unlock_wr(req);
@ -155,14 +155,8 @@ static const struct ocf_io_if _io_if_wt_resume = {
.write = _ocf_write_wt_do,
};
static enum ocf_engine_lock_type ocf_wt_get_lock_type(struct ocf_request *req)
{
return ocf_engine_lock_write;
}
static const struct ocf_engine_callbacks _wt_engine_callbacks =
{
.get_lock_type = ocf_wt_get_lock_type,
.resume = ocf_engine_on_resume,
};

View File

@ -1,70 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_EVICTION_POLICY_H__
#define __LAYER_EVICTION_POLICY_H__
#include "ocf/ocf.h"
#include "lru.h"
#include "lru_structs.h"
#define OCF_PENDING_EVICTION_LIMIT 512UL
#define OCF_NUM_EVICTION_LISTS 32
struct ocf_user_part;
struct ocf_request;
struct eviction_policy {
union {
struct lru_eviction_policy lru;
} policy;
};
/* Eviction policy metadata per cache line */
union eviction_policy_meta {
struct lru_eviction_policy_meta lru;
} __attribute__((packed));
/* the caller must hold the metadata lock for all operations
*
* For range operations the caller can:
* set core_id to -1 to purge the whole cache device
* set core_id to -2 to purge the whole cache partition
*/
struct eviction_policy_ops {
void (*init_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
void (*rm_cline)(ocf_cache_t cache,
ocf_cache_line_t cline);
bool (*can_evict)(ocf_cache_t cache);
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
uint32_t cline_no);
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
void (*dirty_cline)(ocf_cache_t cache,
struct ocf_user_part *part,
uint32_t cline_no);
void (*clean_cline)(ocf_cache_t cache,
struct ocf_user_part *part,
uint32_t cline_no);
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
ocf_queue_t io_queue, uint32_t count);
const char *name;
};
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
/*
* Deallocates space according to eviction priorities.
*
* @returns:
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
* 'LOOKUP_MISS' otherwise
*/
int space_managment_evict_do(struct ocf_request *req);
int space_management_free(ocf_cache_t cache, uint32_t count);
#endif

View File

@ -1,790 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
#include "lru.h"
#include "ops.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#include "../mngt/ocf_mngt_common.h"
#include "../engine/engine_zero.h"
#include "../ocf_cache_priv.h"
#include "../ocf_request.h"
#include "../engine/engine_common.h"
#define OCF_EVICTION_MAX_SCAN 1024
static const ocf_cache_line_t end_marker = (ocf_cache_line_t)-1;
/* Adds the given collision_index to the _head_ of the LRU list */
static void add_lru_head(ocf_cache_t cache,
struct ocf_lru_list *list,
unsigned int collision_index)
{
struct lru_eviction_policy_meta *node;
unsigned int curr_head_index;
ENV_BUG_ON(collision_index == end_marker);
node = &ocf_metadata_get_eviction_policy(cache, collision_index)->lru;
node->hot = false;
/* First node to be added/ */
if (!list->num_nodes) {
list->head = collision_index;
list->tail = collision_index;
node->next = end_marker;
node->prev = end_marker;
list->num_nodes = 1;
} else {
struct lru_eviction_policy_meta *curr_head;
/* Not the first node to be added. */
curr_head_index = list->head;
ENV_BUG_ON(curr_head_index == end_marker);
curr_head = &ocf_metadata_get_eviction_policy(cache,
curr_head_index)->lru;
node->next = curr_head_index;
node->prev = end_marker;
curr_head->prev = collision_index;
node->hot = true;
if (!curr_head->hot)
list->last_hot = collision_index;
++list->num_hot;
list->head = collision_index;
++list->num_nodes;
}
}
/* Deletes the node with the given collision_index from the lru list */
static void remove_lru_list(ocf_cache_t cache,
struct ocf_lru_list *list,
unsigned int collision_index)
{
int is_head = 0, is_tail = 0;
uint32_t prev_lru_node, next_lru_node;
struct lru_eviction_policy_meta *node;
ENV_BUG_ON(collision_index == end_marker);
node = &ocf_metadata_get_eviction_policy(cache, collision_index)->lru;
is_head = (list->head == collision_index);
is_tail = (list->tail == collision_index);
if (node->hot)
--list->num_hot;
/* Set prev and next (even if not existent) */
next_lru_node = node->next;
prev_lru_node = node->prev;
/* Case 1: If we are head AND tail, there is only one node.
* So unlink node and set that there is no node left in the list.
*/
if (is_head && is_tail) {
node->next = end_marker;
node->prev = end_marker;
list->head = end_marker;
list->tail = end_marker;
list->last_hot = end_marker;
ENV_BUG_ON(list->num_hot != 0);
}
/* Case 2: else if this collision_index is LRU head, but not tail,
* update head and return
*/
else if (is_head) {
struct lru_eviction_policy_meta *next_node;
ENV_BUG_ON(next_lru_node == end_marker);
next_node = &ocf_metadata_get_eviction_policy(cache,
next_lru_node)->lru;
if (list->last_hot == collision_index) {
ENV_BUG_ON(list->num_hot != 0);
list->last_hot = end_marker;
}
list->head = next_lru_node;
node->next = end_marker;
next_node->prev = end_marker;
}
/* Case 3: else if this collision_index is LRU tail, but not head,
* update tail and return
*/
else if (is_tail) {
struct lru_eviction_policy_meta *prev_node;
ENV_BUG_ON(prev_lru_node == end_marker);
list->tail = prev_lru_node;
prev_node = &ocf_metadata_get_eviction_policy(cache,
prev_lru_node)->lru;
node->prev = end_marker;
prev_node->next = end_marker;
}
/* Case 4: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
struct lru_eviction_policy_meta *prev_node;
struct lru_eviction_policy_meta *next_node;
ENV_BUG_ON(next_lru_node == end_marker);
ENV_BUG_ON(prev_lru_node == end_marker);
next_node = &ocf_metadata_get_eviction_policy(cache,
next_lru_node)->lru;
prev_node = &ocf_metadata_get_eviction_policy(cache,
prev_lru_node)->lru;
if (list->last_hot == collision_index) {
ENV_BUG_ON(list->num_hot == 0);
list->last_hot = prev_lru_node;
}
/* Update prev and next nodes */
prev_node->next = node->next;
next_node->prev = node->prev;
/* Update the given node */
node->next = end_marker;
node->prev = end_marker;
}
node->hot = false;
--list->num_nodes;
}
/* Increase / decrease number of hot elements to achieve target count.
* Asssumes that the list has hot element clustered together at the
* head of the list.
*/
static void balance_lru_list(ocf_cache_t cache,
struct ocf_lru_list *list)
{
unsigned target_hot_count = list->num_nodes / OCF_LRU_HOT_RATIO;
struct lru_eviction_policy_meta *node;
if (target_hot_count == list->num_hot)
return;
if (list->num_hot == 0) {
node = &ocf_metadata_get_eviction_policy(cache,
list->head)->lru;
list->last_hot = list->head;
list->num_hot = 1;
node->hot = 1;
return;
}
ENV_BUG_ON(list->last_hot == end_marker);
node = &ocf_metadata_get_eviction_policy(cache,
list->last_hot)->lru;
if (target_hot_count > list->num_hot) {
++list->num_hot;
list->last_hot = node->next;
node = &ocf_metadata_get_eviction_policy(cache,
node->next)->lru;
node->hot = true;
} else {
if (list->last_hot == list->head) {
node->hot = false;
list->num_hot = 0;
list->last_hot = end_marker;
} else {
ENV_BUG_ON(node->prev == end_marker);
node->hot = false;
--list->num_hot;
list->last_hot = node->prev;
}
}
}
/*-- End of LRU functions*/
void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct lru_eviction_policy_meta *node;
node = &ocf_metadata_get_eviction_policy(cache, cline)->lru;
node->hot = false;
node->prev = end_marker;
node->next = end_marker;
}
static struct ocf_lru_list *evp_lru_get_list(struct ocf_user_part *part,
uint32_t evp, bool clean)
{
return clean ? &part->runtime->eviction[evp].policy.lru.clean :
&part->runtime->eviction[evp].policy.lru.dirty;
}
static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
return evp_lru_get_list(part, ev_list,
!metadata_test_dirty(cache, cline));
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct ocf_lru_list *list;
list = evp_get_cline_list(cache, cline);
remove_lru_list(cache, list, cline);
balance_lru_list(cache, list);
}
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
struct ocf_user_part *part, uint32_t start_evp, bool clean,
bool cl_lock_write, _lru_hash_locked_pfn hash_locked,
struct ocf_request *req)
{
uint32_t i;
/* entire iterator implementation depends on gcc builtins for
bit operations which works on 64 bit integers at most */
ENV_BUILD_BUG_ON(OCF_NUM_EVICTION_LISTS > sizeof(iter->evp) * 8);
iter->cache = cache;
iter->part = part;
/* set iterator value to start_evp - 1 modulo OCF_NUM_EVICTION_LISTS */
iter->evp = (start_evp + OCF_NUM_EVICTION_LISTS - 1) % OCF_NUM_EVICTION_LISTS;
iter->num_avail_evps = OCF_NUM_EVICTION_LISTS;
iter->next_avail_evp = ((1ULL << OCF_NUM_EVICTION_LISTS) - 1);
iter->clean = clean;
iter->cl_lock_write = cl_lock_write;
iter->hash_locked = hash_locked;
iter->req = req;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
iter->curr_cline[i] = evp_lru_get_list(part, i, clean)->tail;
}
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_user_part *part,
uint32_t start_evp)
{
/* Lock cachelines for read, non-exclusive access */
lru_iter_init(iter, cache, part, start_evp, false, false,
NULL, NULL);
}
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
ocf_cache_t cache, struct ocf_user_part *part,
uint32_t start_evp, bool cl_lock_write,
struct ocf_request *req)
{
/* Lock hash buckets for write, cachelines according to user request,
* however exclusive cacheline access is needed even in case of read
* access. _evp_lru_evict_hash_locked tells whether given hash bucket
* is already locked as part of request hash locking (to avoid attempt
* to acquire the same hash bucket lock twice) */
lru_iter_init(iter, cache, part, start_evp, true, cl_lock_write,
ocf_req_hash_in_range, req);
}
static inline uint32_t _lru_next_evp(struct ocf_lru_iter *iter)
{
unsigned increment;
increment = __builtin_ffsll(iter->next_avail_evp);
iter->next_avail_evp = ocf_rotate_right(iter->next_avail_evp,
increment, OCF_NUM_EVICTION_LISTS);
iter->evp = (iter->evp + increment) % OCF_NUM_EVICTION_LISTS;
return iter->evp;
}
static inline bool _lru_evp_is_empty(struct ocf_lru_iter *iter)
{
return !(iter->next_avail_evp & (1ULL << (OCF_NUM_EVICTION_LISTS - 1)));
}
static inline void _lru_evp_set_empty(struct ocf_lru_iter *iter)
{
iter->next_avail_evp &= ~(1ULL << (OCF_NUM_EVICTION_LISTS - 1));
iter->num_avail_evps--;
}
static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
{
return iter->num_avail_evps == 0;
}
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
return iter->cl_lock_write ?
ocf_cache_line_try_lock_wr(c, cline) :
ocf_cache_line_try_lock_rd(c, cline);
}
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
if (iter->cl_lock_write)
ocf_cache_line_unlock_wr(c, cline);
else
ocf_cache_line_unlock_rd(c, cline);
}
static bool inline _lru_trylock_hash(struct ocf_lru_iter *iter,
ocf_core_id_t core_id, uint64_t core_line)
{
if (iter->hash_locked != NULL && iter->hash_locked(
iter->req, core_id, core_line)) {
return true;
}
return ocf_hb_cline_naked_trylock_wr(
&iter->cache->metadata.lock,
core_id, core_line);
}
static void inline _lru_unlock_hash(struct ocf_lru_iter *iter,
ocf_core_id_t core_id, uint64_t core_line)
{
if (iter->hash_locked != NULL && iter->hash_locked(
iter->req, core_id, core_line)) {
return;
}
ocf_hb_cline_naked_unlock_wr(
&iter->cache->metadata.lock,
core_id, core_line);
}
static bool inline _lru_iter_evition_lock(struct ocf_lru_iter *iter,
ocf_cache_line_t cache_line,
ocf_core_id_t *core_id, uint64_t *core_line)
{
struct ocf_request *req = iter->req;
if (!_lru_trylock_cacheline(iter, cache_line))
return false;
ocf_metadata_get_core_info(iter->cache, cache_line,
core_id, core_line);
/* avoid evicting current request target cachelines */
if (*core_id == ocf_core_get_id(req->core) &&
*core_line >= req->core_line_first &&
*core_line <= req->core_line_last) {
_lru_unlock_cacheline(iter, cache_line);
return false;
}
if (!_lru_trylock_hash(iter, *core_id, *core_line)) {
_lru_unlock_cacheline(iter, cache_line);
return false;
}
if (!ocf_cache_line_is_locked_exclusively(iter->cache,
cache_line)) {
_lru_unlock_hash(iter, *core_id, *core_line);
_lru_unlock_cacheline(iter, cache_line);
return false;
}
return true;
}
/* Get next clean cacheline from tail of lru lists. Caller must not hold any
* eviction list lock. Returned cacheline is read or write locked, depending on
* iter->write_lock. Returned cacheline has corresponding metadata hash bucket
* locked. Cacheline is moved to the head of lru list before being returned */
static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
ocf_core_id_t *core_id, uint64_t *core_line)
{
uint32_t curr_evp;
ocf_cache_line_t cline;
ocf_cache_t cache = iter->cache;
struct ocf_user_part *part = iter->part;
struct ocf_lru_list *list;
do {
curr_evp = _lru_next_evp(iter);
ocf_metadata_eviction_wr_lock(&cache->metadata.lock, curr_evp);
list = evp_lru_get_list(part, curr_evp, iter->clean);
cline = list->tail;
while (cline != end_marker && !_lru_iter_evition_lock(iter,
cline, core_id, core_line)) {
cline = ocf_metadata_get_eviction_policy(
iter->cache, cline)->lru.prev;
}
if (cline != end_marker) {
remove_lru_list(cache, list, cline);
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
}
ocf_metadata_eviction_wr_unlock(&cache->metadata.lock, curr_evp);
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
return cline;
}
/* Get next dirty cacheline from tail of lru lists. Caller must hold all
* eviction list locks during entire iteration proces. Returned cacheline
* is read or write locked, depending on iter->write_lock */
static inline ocf_cache_line_t lru_iter_cleaning_next(struct ocf_lru_iter *iter)
{
uint32_t curr_evp;
ocf_cache_line_t cline;
do {
curr_evp = _lru_next_evp(iter);
cline = iter->curr_cline[curr_evp];
while (cline != end_marker && !_lru_trylock_cacheline(iter,
cline)) {
cline = ocf_metadata_get_eviction_policy(
iter->cache, cline)->lru.prev;
}
if (cline != end_marker) {
iter->curr_cline[curr_evp] =
ocf_metadata_get_eviction_policy(
iter->cache , cline)->lru.prev;
}
if (cline == end_marker && !_lru_evp_is_empty(iter)) {
/* mark list as empty */
_lru_evp_set_empty(iter);
}
} while (cline == end_marker && !_lru_evp_all_empty(iter));
return cline;
}
static void evp_lru_clean_end(void *private_data, int error)
{
struct ocf_part_cleaning_ctx *ctx = private_data;
unsigned i;
for (i = 0; i < OCF_EVICTION_CLEAN_SIZE; i++) {
if (ctx->cline[i] != end_marker)
ocf_cache_line_unlock_rd(ctx->cache->device->concurrency
.cache_line, ctx->cline[i]);
}
ocf_refcnt_dec(&ctx->counter);
}
static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
uint32_t idx, ocf_cache_line_t *line)
{
struct ocf_part_cleaning_ctx *ctx = getter_context;
if (ctx->cline[idx] == end_marker)
return -1;
*line = ctx->cline[idx];
return 0;
}
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
ocf_queue_t io_queue, uint32_t count)
{
struct ocf_part_cleaning_ctx *ctx = &part->cleaning;
struct ocf_cleaner_attribs attribs = {
.lock_cacheline = false,
.lock_metadata = true,
.do_sort = true,
.cmpl_context = &part->cleaning,
.cmpl_fn = evp_lru_clean_end,
.getter = evp_lru_clean_get,
.getter_context = &part->cleaning,
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
.io_queue = io_queue
};
ocf_cache_line_t *cline = part->cleaning.cline;
struct ocf_lru_iter iter;
unsigned evp;
int cnt;
unsigned i;
unsigned lock_idx;
if (ocf_mngt_cache_is_locked(cache))
return;
cnt = ocf_refcnt_inc(&ctx->counter);
if (!cnt) {
/* cleaner disabled by management operation */
return;
}
if (cnt > 1) {
/* cleaning already running for this partition */
ocf_refcnt_dec(&ctx->counter);
return;
}
part->cleaning.cache = cache;
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
ocf_metadata_start_shared_access(&cache->metadata.lock, lock_idx);
OCF_METADATA_EVICTION_WR_LOCK_ALL();
lru_iter_cleaning_init(&iter, cache, part, evp);
i = 0;
while (i < OCF_EVICTION_CLEAN_SIZE) {
cline[i] = lru_iter_cleaning_next(&iter);
if (cline[i] == end_marker)
break;
i++;
}
while (i < OCF_EVICTION_CLEAN_SIZE)
cline[i++] = end_marker;
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
ocf_metadata_end_shared_access(&cache->metadata.lock, lock_idx);
ocf_cleaner_fire(cache, &attribs);
}
bool evp_lru_can_evict(ocf_cache_t cache)
{
if (env_atomic_read(&cache->pending_eviction_clines) >=
OCF_PENDING_EVICTION_LIMIT) {
return false;
}
return true;
}
/* the caller must hold the metadata lock */
uint32_t evp_lru_req_clines(struct ocf_request *req,
struct ocf_user_part *part, uint32_t cline_no)
{
struct ocf_alock* alock;
struct ocf_lru_iter iter;
uint32_t i;
ocf_cache_line_t cline;
uint64_t core_line;
ocf_core_id_t core_id;
ocf_core_t core;
ocf_cache_t cache = req->cache;
bool cl_write_lock =
(req->engine_cbs->get_lock_type(req) == ocf_engine_lock_write);
unsigned evp;
unsigned req_idx = 0;
if (cline_no == 0)
return 0;
if (unlikely(ocf_engine_unmapped_count(req) < cline_no)) {
ocf_cache_log(req->cache, log_err, "Not enough space in"
"request: unmapped %u, requested %u",
ocf_engine_unmapped_count(req),
cline_no);
ENV_BUG();
}
evp = req->io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
lru_iter_eviction_init(&iter, cache, part, evp, cl_write_lock, req);
i = 0;
while (i < cline_no) {
if (!evp_lru_can_evict(cache))
break;
cline = lru_iter_eviction_next(&iter, &core_id, &core_line);
if (cline == end_marker)
break;
ENV_BUG_ON(metadata_test_dirty(cache, cline));
/* TODO: if atomic mode is restored, need to zero metadata
* before proceeding with cleaning (see version <= 20.12) */
/* find next unmapped cacheline in request */
while (req_idx + 1 < req->core_line_count &&
req->map[req_idx].status != LOOKUP_MISS) {
req_idx++;
}
ENV_BUG_ON(req->map[req_idx].status != LOOKUP_MISS);
ocf_metadata_start_collision_shared_access(
cache, cline);
metadata_clear_valid_sec(cache, cline, 0, ocf_line_end_sector(cache));
ocf_metadata_remove_from_collision(cache, cline, part->id);
ocf_metadata_end_collision_shared_access(
cache, cline);
core = ocf_cache_get_core(cache, core_id);
env_atomic_dec(&core->runtime_meta->cached_clines);
env_atomic_dec(&core->runtime_meta->
part_counters[part->id].cached_clines);
_lru_unlock_hash(&iter, core_id, core_line);
ocf_map_cache_line(req, req_idx, cline);
req->map[req_idx].status = LOOKUP_REMAPPED;
ocf_engine_patch_req_info(cache, req, req_idx);
alock = ocf_cache_line_concurrency(iter.cache);
ocf_alock_mark_index_locked(alock, req, req_idx, true);
req->alock_rw = cl_write_lock ? OCF_WRITE : OCF_READ;
++req_idx;
++i;
/* Number of cachelines to evict have to match space in the request */
ENV_BUG_ON(req_idx == req->core_line_count && i != cline_no );
}
return i;
}
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(ocf_cache_t cache, ocf_cache_line_t cline)
{
struct lru_eviction_policy_meta *node;
struct ocf_lru_list *list;
bool hot;
node = &ocf_metadata_get_eviction_policy(cache, cline)->lru;
OCF_METADATA_EVICTION_RD_LOCK(cline);
hot = node->hot;
OCF_METADATA_EVICTION_RD_UNLOCK(cline);
if (hot)
return;
list = evp_get_cline_list(cache, cline);
OCF_METADATA_EVICTION_WR_LOCK(cline);
if (node->next != end_marker ||
node->prev != end_marker ||
list->head == cline || list->tail == cline) {
remove_lru_list(cache, list, cline);
}
/* Update LRU */
add_lru_head(cache, list, cline);
balance_lru_list(cache, list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
static inline void _lru_init(struct ocf_lru_list *list)
{
list->num_nodes = 0;
list->head = end_marker;
list->tail = end_marker;
list->num_hot = 0;
list->last_hot = end_marker;
}
void evp_lru_init_evp(ocf_cache_t cache, struct ocf_user_part *part)
{
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
uint32_t i;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
clean_list = evp_lru_get_list(part, i, true);
dirty_list = evp_lru_get_list(part, i, false);
_lru_init(clean_list);
_lru_init(dirty_list);
}
}
void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_user_part *part,
uint32_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
remove_lru_list(cache, dirty_list, cline);
balance_lru_list(cache, dirty_list);
add_lru_head(cache, clean_list, cline);
balance_lru_list(cache, clean_list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}
void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_user_part *part,
uint32_t cline)
{
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
struct ocf_lru_list *clean_list;
struct ocf_lru_list *dirty_list;
clean_list = evp_lru_get_list(part, ev_list, true);
dirty_list = evp_lru_get_list(part, ev_list, false);
OCF_METADATA_EVICTION_WR_LOCK(cline);
remove_lru_list(cache, clean_list, cline);
balance_lru_list(cache, clean_list);
add_lru_head(cache, dirty_list, cline);
balance_lru_list(cache, dirty_list);
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
}

View File

@ -1,27 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_H__
#define __EVICTION_LRU_H__
#include "eviction.h"
#include "lru_structs.h"
struct ocf_user_part;
struct ocf_request;
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_request *req,
struct ocf_user_part *part, uint32_t cline_no);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_user_part *part);
void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_user_part *part,
uint32_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_user_part *part,
uint32_t cline);
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
ocf_queue_t io_queue, uint32_t count);
#endif

View File

@ -1,114 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef LAYER_EVICTION_POLICY_OPS_H_
#define LAYER_EVICTION_POLICY_OPS_H_
#include "eviction.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_metadata_concurrency.h"
/**
* @brief Initialize cache line before adding it into eviction
*
* @note This operation is called under WR metadata lock
*/
static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache,
ocf_cache_line_t line)
{
uint8_t type;
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_cline))
evict_policy_ops[type].init_cline(cache, line);
}
static inline void ocf_eviction_purge_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].rm_cline)) {
OCF_METADATA_EVICTION_WR_LOCK(line);
evict_policy_ops[type].rm_cline(cache, line);
OCF_METADATA_EVICTION_WR_UNLOCK(line);
}
}
static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
if (likely(evict_policy_ops[type].can_evict))
return evict_policy_ops[type].can_evict(cache);
return true;
}
static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
struct ocf_request *req, struct ocf_user_part *part,
uint32_t clines)
{
uint8_t type;
uint32_t result = 0;
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].req_clines)) {
result = evict_policy_ops[type].req_clines(req,
part, clines);
}
return result;
}
static inline void ocf_eviction_set_hot_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].hot_cline)) {
evict_policy_ops[type].hot_cline(cache, line);
}
}
static inline void ocf_eviction_initialize(struct ocf_cache *cache,
struct ocf_user_part *part)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_WR_LOCK_ALL();
evict_policy_ops[type].init_evp(cache, part);
OCF_METADATA_EVICTION_WR_UNLOCK_ALL();
}
}
static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
struct ocf_user_part *part, ocf_queue_t io_queue,
uint32_t count)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].flush_dirty)) {
evict_policy_ops[type].flush_dirty(cache, part, io_queue,
count);
}
}
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */

View File

@ -14,7 +14,6 @@
#include "metadata_segment.h"
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_def_priv.h"
#include "../ocf_freelist.h"
#include "../ocf_priv.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
@ -36,6 +35,11 @@
#define OCF_METADATA_HASH_DIFF_MAX 1000
struct ocf_part_runtime_meta {
struct ocf_part_runtime runtime;
struct cleaning_policy clean_pol;
};
enum {
ocf_metadata_status_type_valid = 0,
ocf_metadata_status_type_dirty,
@ -67,7 +71,7 @@ static ocf_cache_line_t ocf_metadata_get_entries(
switch (type) {
case metadata_segment_collision:
case metadata_segment_cleaning:
case metadata_segment_eviction:
case metadata_segment_lru:
case metadata_segment_list_info:
return cache_lines;
@ -86,10 +90,10 @@ static ocf_cache_line_t ocf_metadata_get_entries(
return 32;
case metadata_segment_part_config:
return OCF_IO_CLASS_MAX + 1;
return OCF_USER_IO_CLASS_MAX + 1;
case metadata_segment_part_runtime:
return OCF_IO_CLASS_MAX + 1;
return OCF_NUM_PARTITIONS;
case metadata_segment_core_config:
return OCF_CORE_MAX;
@ -120,8 +124,8 @@ static int64_t ocf_metadata_get_element_size(
ENV_BUG_ON(type >= metadata_segment_variable_size_start && !settings);
switch (type) {
case metadata_segment_eviction:
size = sizeof(union eviction_policy_meta);
case metadata_segment_lru:
size = sizeof(struct ocf_lru_meta);
break;
case metadata_segment_cleaning:
@ -154,7 +158,7 @@ static int64_t ocf_metadata_get_element_size(
break;
case metadata_segment_part_runtime:
size = sizeof(struct ocf_user_part_runtime);
size = sizeof(struct ocf_part_runtime_meta);
break;
case metadata_segment_hash:
@ -324,7 +328,7 @@ const char * const ocf_metadata_segment_names[] = {
[metadata_segment_part_config] = "Part config",
[metadata_segment_part_runtime] = "Part runtime",
[metadata_segment_cleaning] = "Cleaning",
[metadata_segment_eviction] = "Eviction",
[metadata_segment_lru] = "LRU list",
[metadata_segment_collision] = "Collision",
[metadata_segment_list_info] = "List info",
[metadata_segment_hash] = "Hash",
@ -515,7 +519,7 @@ static int ocf_metadata_init_fixed_size(struct ocf_cache *cache,
struct ocf_core_meta_config *core_meta_config;
struct ocf_core_meta_runtime *core_meta_runtime;
struct ocf_user_part_config *part_config;
struct ocf_user_part_runtime *part_runtime;
struct ocf_part_runtime_meta *part_runtime_meta;
struct ocf_metadata_segment *superblock;
ocf_core_t core;
ocf_core_id_t core_id;
@ -565,13 +569,16 @@ static int ocf_metadata_init_fixed_size(struct ocf_cache *cache,
/* Set partition metadata */
part_config = METADATA_MEM_POOL(ctrl, metadata_segment_part_config);
part_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_part_runtime);
part_runtime_meta = METADATA_MEM_POOL(ctrl,
metadata_segment_part_runtime);
for (i = 0; i < OCF_IO_CLASS_MAX + 1; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX + 1; i++) {
cache->user_parts[i].config = &part_config[i];
cache->user_parts[i].runtime = &part_runtime[i];
cache->user_parts[i].id = i;
cache->user_parts[i].clean_pol = &part_runtime_meta[i].clean_pol;
cache->user_parts[i].part.runtime =
&part_runtime_meta[i].runtime;
}
cache->free.runtime= &part_runtime_meta[PARTITION_FREELIST].runtime;
/* Set core metadata */
core_meta_config = METADATA_MEM_POOL(ctrl,
@ -948,7 +955,7 @@ struct ocf_pipeline_arg ocf_metadata_flush_all_args[] = {
OCF_PL_ARG_INT(metadata_segment_part_runtime),
OCF_PL_ARG_INT(metadata_segment_core_runtime),
OCF_PL_ARG_INT(metadata_segment_cleaning),
OCF_PL_ARG_INT(metadata_segment_eviction),
OCF_PL_ARG_INT(metadata_segment_lru),
OCF_PL_ARG_INT(metadata_segment_collision),
OCF_PL_ARG_INT(metadata_segment_list_info),
OCF_PL_ARG_INT(metadata_segment_hash),
@ -1092,7 +1099,7 @@ out:
struct ocf_pipeline_arg ocf_metadata_load_all_args[] = {
OCF_PL_ARG_INT(metadata_segment_core_runtime),
OCF_PL_ARG_INT(metadata_segment_cleaning),
OCF_PL_ARG_INT(metadata_segment_eviction),
OCF_PL_ARG_INT(metadata_segment_lru),
OCF_PL_ARG_INT(metadata_segment_collision),
OCF_PL_ARG_INT(metadata_segment_list_info),
OCF_PL_ARG_INT(metadata_segment_hash),
@ -1146,18 +1153,21 @@ static void _recovery_rebuild_cline_metadata(ocf_cache_t cache,
ocf_core_t core = ocf_cache_get_core(cache, core_id);
ocf_part_id_t part_id;
ocf_cache_line_t hash_index;
struct ocf_part_runtime *part;
part_id = PARTITION_DEFAULT;
part = cache->user_parts[part_id].part.runtime;
ocf_metadata_add_to_partition(cache, part_id, cache_line);
ocf_metadata_set_partition_id(cache, part_id, cache_line);
env_atomic_inc(&part->curr_size);
hash_index = ocf_metadata_hash_func(cache, core_line, core_id);
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
cache_line);
ocf_eviction_init_cache_line(cache, cache_line);
ocf_lru_init_cline(cache, cache_line);
ocf_eviction_set_hot_cache_line(cache, cache_line);
ocf_lru_hot_cline(cache, cache_line);
env_atomic_inc(&core->runtime_meta->cached_clines);
env_atomic_inc(&core->runtime_meta->

View File

@ -15,10 +15,6 @@ struct ocf_metadata_list_info {
/*!< Previous cache line in collision list */
ocf_cache_line_t next_col;
/*!< Next cache line in collision list*/
ocf_cache_line_t partition_prev;
/*!< Previous cache line in the same partition*/
ocf_cache_line_t partition_next;
/*!< Next cache line in the same partition*/
ocf_part_id_t partition_id : 8;
/*!< ID of partition where is assigned this cache line*/
} __attribute__((packed));

View File

@ -11,15 +11,14 @@
/*
* Eviction policy - Get
*/
union eviction_policy_meta *
ocf_metadata_get_eviction_policy(struct ocf_cache *cache,
struct ocf_lru_meta * ocf_metadata_get_lru(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_metadata_ctrl *ctrl
= (struct ocf_metadata_ctrl *) cache->metadata.priv;
return ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_eviction]), line);
&(ctrl->raw_desc[metadata_segment_lru]), line);
}

View File

@ -6,8 +6,8 @@
#ifndef __METADATA_EVICTION_H__
#define __METADATA_EVICTION_H__
union eviction_policy_meta *
ocf_metadata_get_eviction_policy(
struct ocf_lru_meta *
ocf_metadata_get_lru(
struct ocf_cache *cache, ocf_cache_line_t line);
#endif /* METADATA_EVICTION_H_ */

View File

@ -5,91 +5,8 @@
#include "ocf/ocf.h"
#include "metadata.h"
#include "../ocf_freelist.h"
#include "../utils/utils_cache_line.h"
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_INVALID to not care about partition_id
*
* METADATA lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_alock *c =
ocf_cache_line_concurrency(cache);
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id != PARTITION_INVALID) {
for (i = cache->user_parts[part_id].runtime->head;
i != cache->device->collision_table_entries;
i = next_i) {
next_i = ocf_metadata_get_partition_next(cache, i);
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
} else {
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(c, i))
ret = -OCF_ERR_AGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
}
return ret;
}
/* the caller must hold the relevant cache block concurrency reader lock
* and the metadata lock
*/
@ -100,10 +17,6 @@ void ocf_metadata_remove_cache_line(struct ocf_cache *cache,
ocf_metadata_get_partition_id(cache, cache_line);
ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
ocf_freelist_put_cache_line(cache->freelist, cache_line);
}
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
@ -128,6 +41,6 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
return ocf_metadata_actor(cache, PARTITION_UNSPECIFIED, core_id,
start_byte, end_byte, ocf_metadata_sparse_cache_line);
}

View File

@ -30,12 +30,4 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif /* __METADATA_MISC_H__ */

View File

@ -6,11 +6,10 @@
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_internal.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
void ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
ocf_part_id_t ocf_metadata_get_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line)
{
const struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
@ -19,26 +18,13 @@ void ocf_metadata_get_partition_info(struct ocf_cache *cache,
info = ocf_metadata_raw_rd_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
if (part_id)
*part_id = info->partition_id;
if (next_line)
*next_line = info->partition_next;
if (prev_line)
*prev_line = info->partition_prev;
} else {
ocf_metadata_error(cache);
if (part_id)
*part_id = PARTITION_DEFAULT;
if (next_line)
*next_line = cache->device->collision_table_entries;
if (prev_line)
*prev_line = cache->device->collision_table_entries;
}
ENV_BUG_ON(!info);
return info->partition_id;
}
void ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
void ocf_metadata_set_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
@ -48,176 +34,7 @@ void ocf_metadata_set_partition_next(struct ocf_cache *cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_next = next_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info)
info->partition_prev = prev_line;
else
ocf_metadata_error(cache);
}
void ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
struct ocf_metadata_list_info *info;
struct ocf_metadata_ctrl *ctrl =
(struct ocf_metadata_ctrl *) cache->metadata.priv;
info = ocf_metadata_raw_wr_access(cache,
&(ctrl->raw_desc[metadata_segment_list_info]), line);
if (info) {
info->partition_id = part_id;
info->partition_next = next_line;
info->partition_prev = prev_line;
} else {
else
ocf_metadata_error(cache);
}
}
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->head = line;
}
/* Adds the given collision_index to the _head_ of the Partition list */
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
ocf_cache_line_t line_head;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
/* First node to be added/ */
if (!part->runtime->curr_size) {
update_partition_head(cache, part_id, line);
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else {
/* Not the first node to be added. */
line_head = part->runtime->head;
ENV_BUG_ON(!(line_head < line_entries));
ocf_metadata_set_partition_info(cache, line, part_id,
line_head, line_entries);
ocf_metadata_set_partition_prev(cache, line_head, line);
update_partition_head(cache, part_id, line);
}
part->runtime->curr_size++;
ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
}
/* Deletes the node with the given collision_index from the Partition list */
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
int is_head, is_tail;
ocf_cache_line_t prev_line, next_line;
uint32_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_partition_lock(&cache->metadata.lock, part_id);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, line, NULL,
&next_line, &prev_line);
/* Find out if this node is Partition _head_ */
is_head = (prev_line == line_entries);
is_tail = (next_line == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (part->runtime->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, line,
part_id, line_entries, line_entries);
update_partition_head(cache, part_id, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes not empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(!(next_line < line_entries));
update_partition_head(cache, part_id, next_line);
ocf_metadata_set_partition_next(cache, line, line_entries);
ocf_metadata_set_partition_prev(cache, next_line,
line_entries);
} else if (is_tail) {
/* Case 3: else if this collision_index is partition list tail
*/
ENV_BUG_ON(!(prev_line < line_entries));
ocf_metadata_set_partition_prev(cache, line, line_entries);
ocf_metadata_set_partition_next(cache, prev_line,
line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(!(next_line < line_entries));
ENV_BUG_ON(!(prev_line < line_entries));
/* Update prev and next nodes */
ocf_metadata_set_partition_next(cache, prev_line, next_line);
ocf_metadata_set_partition_prev(cache, next_line, prev_line);
/* Update the given node */
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
}
part->runtime->curr_size--;
ocf_metadata_partition_unlock(&cache->metadata.lock, part_id);
}

View File

@ -10,62 +10,16 @@
#include "../ocf_cache_priv.h"
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_UNSPECIFIED ((ocf_part_id_t)-1)
#define PARTITION_FREELIST OCF_USER_IO_CLASS_MAX + 1
#define PARTITION_SIZE_MIN 0
#define PARTITION_SIZE_MAX 100
void ocf_metadata_get_partition_info(
ocf_part_id_t ocf_metadata_get_partition_id(struct ocf_cache *cache,
ocf_cache_line_t line);
void ocf_metadata_set_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line);
static inline ocf_part_id_t ocf_metadata_get_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_part_id_t part_id;
ocf_metadata_get_partition_info(cache, line, &part_id, NULL, NULL);
return part_id;
}
static inline ocf_cache_line_t ocf_metadata_get_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_cache_line_t next;
ocf_metadata_get_partition_info(cache, line, NULL, &next, NULL);
return next;
}
static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
ocf_cache_line_t prev;
ocf_metadata_get_partition_info(cache, line, NULL, NULL, &prev);
return prev;
}
void ocf_metadata_set_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next_line);
void ocf_metadata_set_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev_line);
void ocf_metadata_set_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t part_id, ocf_cache_line_t next_line,
ocf_cache_line_t prev_line);
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
ocf_part_id_t part_id);
#endif /* __METADATA_PARTITION_H__ */

View File

@ -8,7 +8,9 @@
#include "../utils/utils_list.h"
#include "../cleaning/cleaning.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
#define OCF_NUM_PARTITIONS OCF_USER_IO_CLASS_MAX + 2
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
@ -26,33 +28,33 @@ struct ocf_user_part_config {
ocf_cache_mode_t cache_mode;
};
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
struct cleaning_policy cleaning;
struct ocf_part_runtime {
env_atomic curr_size;
struct ocf_lru_part_meta lru[OCF_NUM_LRU_LISTS];
};
typedef bool ( *_lru_hash_locked_pfn)(struct ocf_request *req,
ocf_core_id_t core_id, uint64_t core_line);
/* Iterator state, visiting all eviction lists within a partition
/* Iterator state, visiting all lru lists within a partition
in round robin order */
struct ocf_lru_iter
{
/* per-partition cacheline iterator */
ocf_cache_line_t curr_cline[OCF_NUM_EVICTION_LISTS];
ocf_cache_line_t curr_cline[OCF_NUM_LRU_LISTS];
/* cache object */
ocf_cache_t cache;
/* cacheline concurrency */
struct ocf_alock *c;
/* target partition */
struct ocf_user_part *part;
/* available (non-empty) eviction list bitmap rotated so that current
@evp is on the most significant bit */
unsigned long long next_avail_evp;
/* number of available eviction lists */
uint32_t num_avail_evps;
/* current eviction list index */
uint32_t evp;
struct ocf_part *part;
/* available (non-empty) lru list bitmap rotated so that current
@lru_idx is on the most significant bit */
unsigned long long next_avail_lru;
/* number of available lru lists */
uint32_t num_avail_lrus;
/* current lru list index */
uint32_t lru_idx;
/* callback to determine whether given hash bucket is already
* locked by the caller */
_lru_hash_locked_pfn hash_locked;
@ -60,8 +62,6 @@ struct ocf_lru_iter
struct ocf_request *req;
/* 1 if iterating over clean lists, 0 if over dirty */
bool clean : 1;
/* 1 if cacheline is to be locked for write, 0 if for read*/
bool cl_lock_write : 1;
};
#define OCF_EVICTION_CLEAN_SIZE 32U
@ -72,10 +72,18 @@ struct ocf_part_cleaning_ctx {
ocf_cache_line_t cline[OCF_EVICTION_CLEAN_SIZE];
};
/* common partition data for both user-deined partitions as
* well as freelist
*/
struct ocf_part {
struct ocf_part_runtime *runtime;
ocf_part_id_t id;
};
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
ocf_part_id_t id;
struct cleaning_policy *clean_pol;
struct ocf_part part;
struct ocf_part_cleaning_ctx cleaning;
struct ocf_lst_entry lst_valid;
};

View File

@ -32,7 +32,7 @@ enum ocf_metadata_segment_id {
/* sections with size dependent on cache device size go here: */
metadata_segment_cleaning = /*!< Cleaning policy */
metadata_segment_variable_size_start,
metadata_segment_eviction, /*!< Eviction policy */
metadata_segment_lru, /*!< Eviction policy */
metadata_segment_collision, /*!< Collision */
metadata_segment_list_info, /*!< Collision */
metadata_segment_hash, /*!< Hash */

View File

@ -7,7 +7,7 @@
#define __METADATA_STRUCTS_H__
#include "metadata_common.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
#include "../cleaning/cleaning.h"
#include "../ocf_request.h"
@ -55,8 +55,8 @@ struct ocf_metadata_lock
{
struct ocf_metadata_global_lock global[OCF_NUM_GLOBAL_META_LOCKS];
/*!< global metadata lock (GML) */
env_rwlock eviction[OCF_NUM_EVICTION_LISTS]; /*!< Fast lock for eviction policy */
env_spinlock partition[OCF_IO_CLASS_MAX]; /* partition lock */
env_rwlock lru[OCF_NUM_LRU_LISTS]; /*!< Fast locks for lru list */
env_spinlock partition[OCF_USER_IO_CLASS_MAX]; /* partition lock */
env_rwsem *hash; /*!< Hash bucket locks */
env_rwsem *collision_pages; /*!< Collision table page locks */
ocf_cache_t cache; /*!< Parent cache object */

View File

@ -161,7 +161,7 @@ static void ocf_metadata_load_superblock_post(ocf_pipeline_t pipeline,
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
}
if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) {
if (sb_config->valid_parts_no > OCF_USER_IO_CLASS_MAX) {
ocf_cache_log(cache, log_err,
"Loading cache state ERROR, invalid partition count\n");
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);

View File

@ -49,8 +49,6 @@ struct ocf_superblock_config {
ocf_promotion_t promotion_policy_type;
struct promotion_policy_config promotion[PROMOTION_POLICY_TYPE_MAX];
ocf_eviction_t eviction_policy_type;
/*
* Checksum for each metadata region.
* This field has to be the last one!

View File

@ -11,8 +11,9 @@
#include "../ocf_queue_priv.h"
#include "../metadata/metadata.h"
#include "../metadata/metadata_io.h"
#include "../metadata/metadata_partition_structs.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
@ -20,9 +21,8 @@
#include "../utils/utils_refcnt.h"
#include "../utils/utils_async_lock.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
#include "../ocf_lru.h"
#include "../ocf_ctx_priv.h"
#include "../ocf_freelist.h"
#include "../cleaning/cleaning.h"
#include "../promotion/ops.h"
@ -123,8 +123,6 @@ struct ocf_cache_attach_context {
* load or recovery
*/
bool freelist_inited : 1;
bool concurrency_inited : 1;
} flags;
@ -169,7 +167,7 @@ static void __init_partitions(ocf_cache_t cache)
OCF_IO_CLASS_PRIO_LOWEST, true));
/* Add other partition to the cache and make it as dummy */
for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
for (i_part = 0; i_part < OCF_USER_IO_CLASS_MAX; i_part++) {
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning.counter);
if (i_part == PARTITION_DEFAULT)
@ -182,26 +180,22 @@ static void __init_partitions(ocf_cache_t cache)
}
}
static void __init_partitions_attached(ocf_cache_t cache)
static void __init_parts_attached(ocf_cache_t cache)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) {
part = &cache->user_parts[part_id];
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++)
ocf_lru_init(cache, &cache->user_parts[part_id].part);
part->runtime->head = cache->device->collision_table_entries;
part->runtime->curr_size = 0;
ocf_eviction_initialize(cache, part);
}
ocf_lru_init(cache, &cache->free);
}
static void __init_freelist(ocf_cache_t cache)
static void __populate_free(ocf_cache_t cache)
{
uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
ocf_get_cache_occupancy(cache);
ocf_freelist_populate(cache->freelist, free_clines);
ocf_lru_populate(cache, free_clines);
}
static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
@ -233,14 +227,6 @@ static void __deinit_cleaning_policy(ocf_cache_t cache)
cleaning_policy_ops[cleaning_policy].deinitialize(cache);
}
static void __init_eviction_policy(ocf_cache_t cache,
ocf_eviction_t eviction)
{
ENV_BUG_ON(eviction < 0 || eviction >= ocf_eviction_max);
cache->conf_meta->eviction_policy_type = eviction;
}
static void __setup_promotion_policy(ocf_cache_t cache)
{
int i;
@ -259,6 +245,11 @@ static void __deinit_promotion_policy(ocf_cache_t cache)
cache->promotion_policy = NULL;
}
static void __init_free(ocf_cache_t cache)
{
cache->free.id = PARTITION_FREELIST;
}
static void __init_cores(ocf_cache_t cache)
{
/* No core devices yet */
@ -283,7 +274,7 @@ static void __reset_stats(ocf_cache_t cache)
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
env_atomic_set(&core->runtime_meta->
part_counters[i].cached_clines, 0);
env_atomic_set(&core->runtime_meta->
@ -292,8 +283,7 @@ static void __reset_stats(ocf_cache_t cache)
}
}
static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
ocf_eviction_t eviction_policy)
static ocf_error_t init_attached_data_structures(ocf_cache_t cache)
{
ocf_error_t result;
@ -301,8 +291,8 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_partitions_attached(cache);
__init_freelist(cache);
__init_parts_attached(cache);
__populate_free(cache);
result = __init_cleaning_policy(cache);
if (result) {
@ -311,7 +301,6 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
return result;
}
__init_eviction_policy(cache, eviction_policy);
__setup_promotion_policy(cache);
return 0;
@ -321,7 +310,7 @@ static void init_attached_data_structures_recovery(ocf_cache_t cache)
{
ocf_metadata_init_hash_table(cache);
ocf_metadata_init_collision(cache);
__init_partitions_attached(cache);
__init_parts_attached(cache);
__reset_stats(cache);
__init_metadata_version(cache);
}
@ -477,7 +466,8 @@ void _ocf_mngt_load_init_instance_complete(void *priv, int error)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
}
__init_freelist(cache);
if (context->metadata.shutdown_status != ocf_metadata_clean_shutdown)
__populate_free(cache);
cleaning_policy = cache->conf_meta->cleaning_policy_type;
if (!cleaning_policy_ops[cleaning_policy].initialize)
@ -685,7 +675,6 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param
cache->pt_unaligned_io = cfg->pt_unaligned_io;
cache->use_submit_io_fast = cfg->use_submit_io_fast;
cache->eviction_policy_init = cfg->eviction_policy;
cache->metadata.is_volatile = cfg->metadata_volatile;
out:
@ -996,12 +985,6 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
context->flags.attached_metadata_inited = true;
ret = ocf_freelist_init(&cache->freelist, cache);
if (ret)
OCF_PL_FINISH_RET(pipeline, ret);
context->flags.freelist_inited = true;
ret = ocf_concurrency_init(cache);
if (ret)
OCF_PL_FINISH_RET(pipeline, ret);
@ -1021,7 +1004,7 @@ static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline,
ocf_cache_t cache = context->cache;
ocf_error_t result;
result = init_attached_data_structures(cache, cache->eviction_policy_init);
result = init_attached_data_structures(cache);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
@ -1147,9 +1130,6 @@ static void _ocf_mngt_attach_handle_error(
if (context->flags.concurrency_inited)
ocf_concurrency_deinit(cache);
if (context->flags.freelist_inited)
ocf_freelist_deinit(cache->freelist);
if (context->flags.volume_inited)
ocf_volume_deinit(&cache->device->volume);
@ -1172,7 +1152,8 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache,
INIT_LIST_HEAD(&cache->io_queues);
/* Init Partitions */
ocf_part_init(cache);
ocf_user_part_init(cache);
__init_free(cache);
__init_cores(cache);
__init_metadata_version(cache);
@ -1886,11 +1867,6 @@ static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg)
if (!ocf_cache_mode_is_valid(cfg->cache_mode))
return -OCF_ERR_INVALID_CACHE_MODE;
if (cfg->eviction_policy >= ocf_eviction_max ||
cfg->eviction_policy < 0) {
return -OCF_ERR_INVAL;
}
if (cfg->promotion_policy >= ocf_promotion_max ||
cfg->promotion_policy < 0 ) {
return -OCF_ERR_INVAL;
@ -2025,7 +2001,6 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
ocf_metadata_deinit_variable_size(cache);
ocf_concurrency_deinit(cache);
ocf_freelist_deinit(cache->freelist);
ocf_volume_deinit(&cache->device->volume);
@ -2092,15 +2067,12 @@ static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx)
static void _ocf_mngt_cache_load_log(ocf_cache_t cache)
{
ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache);
ocf_eviction_t eviction_type = cache->conf_meta->eviction_policy_type;
ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
ocf_promotion_t promotion_type = cache->conf_meta->promotion_policy_type;
ocf_cache_log(cache, log_info, "Successfully loaded\n");
ocf_cache_log(cache, log_info, "Cache mode : %s\n",
_ocf_cache_mode_get_name(cache_mode));
ocf_cache_log(cache, log_info, "Eviction policy : %s\n",
evict_policy_ops[eviction_type].name);
ocf_cache_log(cache, log_info, "Cleaning policy : %s\n",
cleaning_policy_ops[cleaning_type].name);
ocf_cache_log(cache, log_info, "Promotion policy : %s\n",

View File

@ -11,7 +11,7 @@
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../ocf_request.h"
#include "../eviction/ops.h"
#include "../ocf_lru.h"
#include "../ocf_logger_priv.h"
#include "../ocf_queue_priv.h"
#include "../engine/engine_common.h"

View File

@ -457,7 +457,7 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
env_atomic_set(&core->runtime_meta->
part_counters[i].cached_clines, 0);
env_atomic_set(&core->runtime_meta->

View File

@ -12,7 +12,7 @@
#include "../engine/engine_common.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../ocf_request.h"

View File

@ -8,19 +8,19 @@
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../eviction/ops.h"
#include "../utils/utils_user_part.h"
#include "../ocf_lru.h"
#include "ocf_env.h"
static uint64_t _ocf_mngt_count_parts_min_size(struct ocf_cache *cache)
static uint64_t _ocf_mngt_count_user_parts_min_size(struct ocf_cache *cache)
{
struct ocf_user_part *part;
struct ocf_user_part *user_part;
ocf_part_id_t part_id;
uint64_t count = 0;
for_each_part(cache, part, part_id) {
if (ocf_part_is_valid(part))
count += part->config->min_size;
for_each_user_part(cache, user_part, part_id) {
if (ocf_user_part_is_valid(user_part))
count += user_part->config->min_size;
}
return count;
@ -37,7 +37,7 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
if (!name)
return -OCF_ERR_INVAL;
if (part_id >= OCF_IO_CLASS_MAX)
if (part_id >= OCF_USER_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
if (cache->user_parts[part_id].config->flags.valid)
@ -56,7 +56,7 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
return -OCF_ERR_INVAL;
}
for_each_lst(&cache->lst_part, iter, iter_id) {
for_each_lst(&cache->user_part_list, iter, iter_id) {
if (iter_id == part_id) {
ocf_cache_log(cache, log_err,
"Part with id %hu already exists\n", part_id);
@ -73,9 +73,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
cache->user_parts[part_id].config->priority = priority;
cache->user_parts[part_id].config->cache_mode = ocf_cache_mode_max;
ocf_part_set_valid(cache, part_id, valid);
ocf_lst_add(&cache->lst_part, part_id);
ocf_part_sort(cache);
ocf_user_part_set_valid(cache, part_id, valid);
ocf_lst_add(&cache->user_part_list, part_id);
ocf_user_part_sort(cache);
cache->user_parts[part_id].config->flags.added = 1;
@ -85,12 +85,13 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
ocf_part_id_t part_id, uint32_t min, uint32_t max)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
struct ocf_user_part *user_part = &cache->user_parts[part_id];
if (min > max)
return -OCF_ERR_INVAL;
if (_ocf_mngt_count_parts_min_size(cache) + min > PARTITION_SIZE_MAX) {
if (_ocf_mngt_count_user_parts_min_size(cache) + min >
PARTITION_SIZE_MAX) {
/* Illegal configuration in which sum of all min_sizes exceeds
* cache size.
*/
@ -100,8 +101,8 @@ static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
if (max > PARTITION_SIZE_MAX)
max = PARTITION_SIZE_MAX;
part->config->min_size = min;
part->config->max_size = max;
user_part->config->min_size = min;
user_part->config->max_size = max;
return 0;
}
@ -123,7 +124,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
dest_part = &cache->user_parts[part_id];
if (!ocf_part_is_added(dest_part)) {
if (!ocf_user_part_is_added(dest_part)) {
ocf_cache_log(cache, log_info, "Setting IO class, id: %u, "
"name: '%s' [ ERROR ]\n", part_id, dest_part->config->name);
return -OCF_ERR_INVAL;
@ -150,7 +151,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
" [ ERROR ]\n", part_id, dest_part->config->name, max);
return -OCF_ERR_INVAL;
}
ocf_part_set_prio(cache, dest_part, prio);
ocf_user_part_set_prio(cache, dest_part, prio);
dest_part->config->cache_mode = cache_mode;
ocf_cache_log(cache, log_info,
@ -175,21 +176,21 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
return -OCF_ERR_INVAL;
}
if (ocf_part_is_valid(dest_part)) {
if (ocf_user_part_is_valid(dest_part)) {
/* Updating existing */
ocf_cache_log(cache, log_info, "Updating existing IO "
"class, id: %u, name: '%s', max size %u%% [ OK ]\n",
part_id, dest_part->config->name, max);
} else {
/* Adding new */
ocf_part_set_valid(cache, part_id, true);
ocf_user_part_set_valid(cache, part_id, true);
ocf_cache_log(cache, log_info, "Adding new IO class, "
"id: %u, name: '%s', max size %u%% [ OK ]\n", part_id,
dest_part->config->name, max);
}
ocf_part_set_prio(cache, dest_part, prio);
ocf_user_part_set_prio(cache, dest_part, prio);
dest_part->config->cache_mode = cache_mode;
return result;
@ -212,13 +213,13 @@ static void _ocf_mngt_io_class_remove(ocf_cache_t cache,
return;
}
if (!ocf_part_is_valid(dest_part)) {
if (!ocf_user_part_is_valid(dest_part)) {
/* Does not exist */
return;
}
ocf_part_set_valid(cache, part_id, false);
ocf_user_part_set_valid(cache, part_id, false);
ocf_cache_log(cache, log_info,
"Removing IO class, id: %u [ OK ]\n", part_id);
@ -240,7 +241,7 @@ static int _ocf_mngt_io_class_edit(ocf_cache_t cache,
static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
const struct ocf_mngt_io_class_config *cfg)
{
if (cfg->class_id >= OCF_IO_CLASS_MAX)
if (cfg->class_id >= OCF_USER_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
/* Name set to null means particular io_class should be removed */
@ -252,13 +253,13 @@ static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
return -OCF_ERR_INVAL;
}
if (!ocf_part_is_name_valid(cfg->name)) {
if (!ocf_user_part_is_name_valid(cfg->name)) {
ocf_cache_log(cache, log_info,
"The name of the partition is not valid\n");
return -OCF_ERR_INVAL;
}
if (!ocf_part_is_prio_valid(cfg->prio)) {
if (!ocf_user_part_is_prio_valid(cfg->prio)) {
ocf_cache_log(cache, log_info,
"Invalid value of the partition priority\n");
return -OCF_ERR_INVAL;
@ -284,7 +285,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
OCF_CHECK_NULL(cache);
OCF_CHECK_NULL(cfg);
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
result = _ocf_mngt_io_class_validate_cfg(cache, &cfg->config[i]);
if (result)
return result;
@ -301,7 +302,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
if (result)
goto out_cpy;
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
result = _ocf_mngt_io_class_edit(cache, &cfg->config[i]);
if (result) {
ocf_cache_log(cache, log_err,
@ -310,7 +311,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
}
}
ocf_part_sort(cache);
ocf_user_part_sort(cache);
out_edit:
if (result) {

View File

@ -8,7 +8,7 @@
#include "engine/cache_engine.h"
#include "utils/utils_cache_line.h"
#include "ocf_request.h"
#include "utils/utils_part.h"
#include "utils/utils_user_part.h"
#include "ocf_priv.h"
#include "ocf_cache_priv.h"
#include "ocf_queue_priv.h"
@ -170,7 +170,6 @@ int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info)
info->fallback_pt.error_counter =
env_atomic_read(&cache->fallback_pt_error_counter);
info->eviction_policy = cache->conf_meta->eviction_policy_type;
info->cleaning_policy = cache->conf_meta->cleaning_policy_type;
info->promotion_policy = cache->conf_meta->promotion_policy_type;
info->metadata_footprint = ocf_cache_is_device_attached(cache) ?

View File

@ -21,7 +21,6 @@
#include "ocf_logger_priv.h"
#include "ocf/ocf_trace.h"
#include "promotion/promotion.h"
#include "ocf_freelist.h"
#define DIRTY_FLUSHED 1
#define DIRTY_NOT_FLUSHED 0
@ -77,12 +76,10 @@ struct ocf_cache {
struct ocf_cache_device *device;
struct ocf_lst lst_part;
struct ocf_user_part user_parts[OCF_IO_CLASS_MAX + 1];
struct ocf_lst user_part_list;
struct ocf_user_part user_parts[OCF_USER_IO_CLASS_MAX + 1];
ocf_freelist_t freelist;
ocf_eviction_t eviction_policy_init;
struct ocf_part free;
uint32_t fallback_pt_error_threshold;
ocf_queue_t mngt_queue;

View File

@ -9,7 +9,7 @@
#include "ocf_io_priv.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_part.h"
#include "utils/utils_user_part.h"
#include "ocf_request.h"
#include "ocf_trace_priv.h"
@ -186,7 +186,7 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
if (io->addr + io->bytes > ocf_volume_get_length(volume))
return -OCF_ERR_INVAL;
if (io->io_class >= OCF_IO_CLASS_MAX)
if (io->io_class >= OCF_USER_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
if (io->dir != OCF_READ && io->dir != OCF_WRITE)
@ -248,7 +248,7 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
return;
}
req->part_id = ocf_part_class2id(cache, io->io_class);
req->part_id = ocf_user_part_class2id(cache, io->io_class);
req->core = core;
req->complete = ocf_req_complete;
@ -310,7 +310,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
req->core = core;
req->complete = ocf_req_complete;
req->part_id = ocf_part_class2id(cache, io->io_class);
req->part_id = ocf_user_part_class2id(cache, io->io_class);
ocf_resolve_effective_cache_mode(cache, core, req);

View File

@ -70,7 +70,7 @@ struct ocf_core_meta_runtime {
* cache device
*/
env_atomic dirty_clines;
} part_counters[OCF_IO_CLASS_MAX];
} part_counters[OCF_USER_IO_CLASS_MAX];
};
struct ocf_core {

View File

@ -1,432 +0,0 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata/metadata.h"
struct ocf_part {
ocf_cache_line_t head;
ocf_cache_line_t tail;
env_atomic64 curr_size;
};
struct ocf_freelist {
/* parent cache */
struct ocf_cache *cache;
/* partition list array */
struct ocf_part *part;
/* freelist lock array */
env_spinlock *lock;
/* number of free lists */
uint32_t count;
/* next slowpath victim idx */
env_atomic slowpath_victim_idx;
/* total number of free lines */
env_atomic64 total_free;
};
static void ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
{
env_spinlock_lock(&freelist->lock[ctx]);
}
static int ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
{
return env_spinlock_trylock(&freelist->lock[ctx]);
}
static void ocf_freelist_unlock(ocf_freelist_t freelist, uint32_t ctx)
{
env_spinlock_unlock(&freelist->lock[ctx]);
}
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void _ocf_freelist_remove_cache_line(ocf_freelist_t freelist,
uint32_t ctx, ocf_cache_line_t cline)
{
struct ocf_cache *cache = freelist->cache;
struct ocf_part *freelist_part = &freelist->part[ctx];
int is_head, is_tail;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ocf_cache_line_t prev, next;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
freelist->cache);
uint32_t free;
ENV_BUG_ON(cline >= line_entries);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
/* Find out if this node is Partition _head_ */
is_head = (prev == line_entries);
is_tail = (next == line_entries);
free = env_atomic64_read(&freelist_part->curr_size);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && free == 1) {
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
freelist_part->head = line_entries;
freelist_part->tail = line_entries;
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(next >= line_entries);
freelist_part->head = next;
ocf_metadata_set_partition_prev(cache, next, line_entries);
ocf_metadata_set_partition_next(cache, cline, line_entries);
} else if (is_tail) {
/* Case 3: else if this cline is partition list tail */
ENV_BUG_ON(prev >= line_entries);
freelist_part->tail = prev;
ocf_metadata_set_partition_prev(cache, cline, line_entries);
ocf_metadata_set_partition_next(cache, prev, line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(next >= line_entries || prev >= line_entries);
/* Update prev and next nodes */
ocf_metadata_set_partition_prev(cache, next, prev);
ocf_metadata_set_partition_next(cache, prev, next);
/* Update the given node */
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
}
env_atomic64_dec(&freelist_part->curr_size);
env_atomic64_dec(&freelist->total_free);
}
static ocf_cache_line_t next_phys_invalid(ocf_cache_t cache,
ocf_cache_line_t phys)
{
ocf_cache_line_t lg;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
if (phys == collision_table_entries)
return collision_table_entries;
lg = ocf_metadata_map_phy2lg(cache, phys);
while (metadata_test_valid_any(cache, lg)) {
++phys;
if (phys == collision_table_entries)
break;
lg = ocf_metadata_map_phy2lg(cache, phys);
}
return phys;
}
/* Assign unused cachelines to freelist */
void ocf_freelist_populate(ocf_freelist_t freelist,
ocf_cache_line_t num_free_clines)
{
unsigned step = 0;
ocf_cache_t cache = freelist->cache;
unsigned num_freelists = freelist->count;
ocf_cache_line_t prev, next, idx;
ocf_cache_line_t phys;
ocf_cache_line_t collision_table_entries =
ocf_metadata_collision_table_entries(cache);
unsigned freelist_idx;
uint64_t freelist_size;
phys = 0;
for (freelist_idx = 0; freelist_idx < num_freelists; freelist_idx++)
{
/* calculate current freelist size */
freelist_size = num_free_clines / num_freelists;
if (freelist_idx < (num_free_clines % num_freelists))
++freelist_size;
env_atomic64_set(&freelist->part[freelist_idx].curr_size,
freelist_size);
if (!freelist_size) {
/* init empty freelist and move to next one */
freelist->part[freelist_idx].head =
collision_table_entries;
freelist->part[freelist_idx].tail =
collision_table_entries;
continue;
}
/* find first invalid cacheline */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
idx = ocf_metadata_map_phy2lg(cache, phys);
++phys;
/* store freelist head */
freelist->part[freelist_idx].head = idx;
/* link freelist elements using partition list */
prev = collision_table_entries;
while (--freelist_size) {
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys == collision_table_entries);
next = ocf_metadata_map_phy2lg(cache, phys);
++phys;
ocf_metadata_set_partition_info(cache, idx,
PARTITION_INVALID, next, prev);
prev = idx;
idx = next;
OCF_COND_RESCHED_DEFAULT(step);
}
/* terminate partition list */
ocf_metadata_set_partition_info(cache, idx, PARTITION_INVALID,
collision_table_entries, prev);
/* store freelist tail */
freelist->part[freelist_idx].tail = idx;
}
/* we should have reached the last invalid cache line */
phys = next_phys_invalid(cache, phys);
ENV_BUG_ON(phys != collision_table_entries);
env_atomic64_set(&freelist->total_free, num_free_clines);
}
static void ocf_freelist_add_cache_line(ocf_freelist_t freelist,
uint32_t ctx, ocf_cache_line_t line)
{
struct ocf_cache *cache = freelist->cache;
struct ocf_part *freelist_part = &freelist->part[ctx];
ocf_cache_line_t tail;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
freelist->cache);
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ENV_BUG_ON(line >= line_entries);
if (env_atomic64_read(&freelist_part->curr_size) == 0) {
freelist_part->head = line;
freelist_part->tail = line;
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, line_entries);
} else {
tail = freelist_part->tail;
ENV_BUG_ON(tail >= line_entries);
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, tail);
ocf_metadata_set_partition_next(cache, tail, line);
freelist_part->tail = line;
}
env_atomic64_inc(&freelist_part->curr_size);
env_atomic64_inc(&freelist->total_free);
}
typedef enum {
OCF_FREELIST_ERR_NOLOCK = 1,
OCF_FREELIST_ERR_LIST_EMPTY,
} ocf_freelist_get_err_t;
static ocf_freelist_get_err_t ocf_freelist_get_cache_line_ctx(
ocf_freelist_t freelist, uint32_t ctx, bool can_wait,
ocf_cache_line_t *cline)
{
if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0)
return -OCF_FREELIST_ERR_LIST_EMPTY;
if (!can_wait && ocf_freelist_trylock(freelist, ctx))
return -OCF_FREELIST_ERR_NOLOCK;
if (can_wait)
ocf_freelist_lock(freelist, ctx);
if (env_atomic64_read(&freelist->part[ctx].curr_size) == 0) {
ocf_freelist_unlock(freelist, ctx);
return -OCF_FREELIST_ERR_LIST_EMPTY;
}
*cline = freelist->part[ctx].head;
_ocf_freelist_remove_cache_line(freelist, ctx, *cline);
ocf_freelist_unlock(freelist, ctx);
return 0;
}
static int get_next_victim_freelist(ocf_freelist_t freelist)
{
int ctx, next;
do {
ctx = env_atomic_read(&freelist->slowpath_victim_idx);
next = (ctx + 1) % freelist->count;
} while (ctx != env_atomic_cmpxchg(&freelist->slowpath_victim_idx, ctx,
next));
return ctx;
}
static bool ocf_freelist_get_cache_line_slow(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
int i, ctx;
int err;
bool lock_err;
/* try slowpath without waiting on lock */
lock_err = false;
for (i = 0; i < freelist->count; i++) {
ctx = get_next_victim_freelist(freelist);
err = ocf_freelist_get_cache_line_ctx(freelist, ctx, false,
cline);
if (!err)
return true;
if (err == -OCF_FREELIST_ERR_NOLOCK)
lock_err = true;
}
if (!lock_err) {
/* Slowpath failed due to empty freelists - no point in
* iterating through contexts to attempt slowpath with full
* lock */
return false;
}
/* slow path with waiting on lock */
for (i = 0; i < freelist->count; i++) {
ctx = get_next_victim_freelist(freelist);
if (!ocf_freelist_get_cache_line_ctx(freelist, ctx, true,
cline)) {
return true;
}
}
return false;
}
static bool ocf_freelist_get_cache_line_fast(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
bool ret;
uint32_t ctx = env_get_execution_context();
ret = !ocf_freelist_get_cache_line_ctx(freelist, ctx, false, cline);
env_put_execution_context(ctx);
return ret;
}
bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t *cline)
{
if (env_atomic64_read(&freelist->total_free) == 0)
return false;
if (!ocf_freelist_get_cache_line_fast(freelist, cline))
return ocf_freelist_get_cache_line_slow(freelist, cline);
return true;
}
void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t cline)
{
uint32_t ctx = env_get_execution_context();
ocf_freelist_lock(freelist, ctx);
ocf_freelist_add_cache_line(freelist, ctx, cline);
ocf_freelist_unlock(freelist, ctx);
env_put_execution_context(ctx);
}
int ocf_freelist_init(ocf_freelist_t *freelist, struct ocf_cache *cache)
{
uint32_t num;
int i;
int result;
ocf_freelist_t tmp_freelist;
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
cache);
tmp_freelist = env_vzalloc(sizeof(*tmp_freelist));
if (!tmp_freelist)
return -OCF_ERR_NO_MEM;
num = env_get_execution_context_count();
tmp_freelist->cache = cache;
tmp_freelist->count = num;
env_atomic64_set(&tmp_freelist->total_free, 0);
tmp_freelist->lock = env_vzalloc(sizeof(tmp_freelist->lock[0]) * num);
tmp_freelist->part = env_vzalloc(sizeof(tmp_freelist->part[0]) * num);
if (!tmp_freelist->lock || !tmp_freelist->part) {
result = -OCF_ERR_NO_MEM;
goto free_allocs;
}
for (i = 0; i < num; i++) {
result = env_spinlock_init(&tmp_freelist->lock[i]);
if (result)
goto spinlock_err;
tmp_freelist->part[i].head = line_entries;
tmp_freelist->part[i].tail = line_entries;
env_atomic64_set(&tmp_freelist->part[i].curr_size, 0);
}
*freelist = tmp_freelist;
return 0;
spinlock_err:
while (i--)
env_spinlock_destroy(&tmp_freelist->lock[i]);
free_allocs:
env_vfree(tmp_freelist->lock);
env_vfree(tmp_freelist->part);
env_vfree(tmp_freelist);
return result;
}
void ocf_freelist_deinit(ocf_freelist_t freelist)
{
int i;
for (i = 0; i < freelist->count; i++)
env_spinlock_destroy(&freelist->lock[i]);
env_vfree(freelist->lock);
env_vfree(freelist->part);
env_vfree(freelist);
}
ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist)
{
return env_atomic64_read(&freelist->total_free);
}

View File

@ -1,34 +0,0 @@
/*
* Copyright(c) 2019-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_FREELIST_H__
#define __OCF_FREELIST_H__
#include "ocf_cache_priv.h"
struct ocf_freelist;
typedef struct ocf_freelist *ocf_freelist_t;
/* Init / deinit freelist runtime structures */
int ocf_freelist_init(ocf_freelist_t *freelist, struct ocf_cache *cache);
void ocf_freelist_deinit(ocf_freelist_t freelist);
/* Assign unused cachelines to freelist */
void ocf_freelist_populate(ocf_freelist_t freelist,
ocf_cache_line_t num_free_clines);
/* Get cacheline from freelist */
bool ocf_freelist_get_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t *cline);
/* Put cacheline back to freelist */
void ocf_freelist_put_cache_line(ocf_freelist_t freelist,
ocf_cache_line_t cline);
/* Return total number of free cachelines */
ocf_cache_line_t ocf_freelist_num_free(ocf_freelist_t freelist);
#endif /* __OCF_FREELIST_H__ */

View File

@ -7,22 +7,23 @@
#include "ocf_priv.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_part.h"
#include "utils/utils_user_part.h"
int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
struct ocf_io_class_info *info)
{
ocf_part_id_t part_id = io_class;
struct ocf_part *part;
OCF_CHECK_NULL(cache);
if (!info)
return -OCF_ERR_INVAL;
if (io_class >= OCF_IO_CLASS_MAX)
if (io_class >= OCF_USER_IO_CLASS_MAX)
return -OCF_ERR_INVAL;
if (!ocf_part_is_valid(&cache->user_parts[part_id])) {
if (!ocf_user_part_is_valid(&cache->user_parts[part_id])) {
/* Partition does not exist */
return -OCF_ERR_IO_CLASS_NOT_EXIST;
}
@ -33,13 +34,14 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
return -OCF_ERR_INVAL;
}
part = &cache->user_parts[part_id].part;
info->priority = cache->user_parts[part_id].config->priority;
info->curr_size = ocf_cache_is_device_attached(cache) ?
cache->user_parts[part_id].runtime->curr_size : 0;
env_atomic_read(&part->runtime->curr_size) : 0;
info->min_size = cache->user_parts[part_id].config->min_size;
info->max_size = cache->user_parts[part_id].config->max_size;
info->eviction_policy_type = cache->conf_meta->eviction_policy_type;
info->cleaning_policy_type = cache->conf_meta->cleaning_policy_type;
info->cache_mode = cache->user_parts[part_id].config->cache_mode;
@ -50,7 +52,7 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
void *cntx)
{
struct ocf_user_part *part;
struct ocf_user_part *user_part;
ocf_part_id_t part_id;
int result = 0;
@ -59,8 +61,8 @@ int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
if (!visitor)
return -OCF_ERR_INVAL;
for_each_part(cache, part, part_id) {
if (!ocf_part_is_valid(part))
for_each_user_part(cache, user_part, part_id) {
if (!ocf_user_part_is_valid(user_part))
continue;
result = visitor(cache, part_id, cntx);

1055
src/ocf_lru.c Normal file

File diff suppressed because it is too large Load Diff

35
src/ocf_lru.h Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_H__
#define __EVICTION_LRU_H__
#include "ocf_space.h"
#include "ocf_lru_structs.h"
struct ocf_part;
struct ocf_user_part;
struct ocf_part_runtime;
struct ocf_part_cleaning_ctx;
struct ocf_request;
void ocf_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline);
void ocf_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool ocf_lru_can_evict(struct ocf_cache *cache);
uint32_t ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no);
void ocf_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void ocf_lru_init(struct ocf_cache *cache, struct ocf_part *part);
void ocf_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
ocf_cache_line_t cline);
void ocf_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,
ocf_cache_line_t cline);
void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
ocf_queue_t io_queue, uint32_t count);
void ocf_lru_repart(ocf_cache_t cache, ocf_cache_line_t cline,
struct ocf_part *src_upart, struct ocf_part *dst_upart);
uint32_t ocf_lru_num_free(ocf_cache_t cache);
void ocf_lru_populate(ocf_cache_t cache, ocf_cache_line_t num_free_clines);
#endif

View File

@ -6,7 +6,7 @@
#define __EVICTION_LRU_STRUCTS_H__
struct lru_eviction_policy_meta {
struct ocf_lru_meta {
uint32_t prev;
uint32_t next;
uint8_t hot;
@ -18,9 +18,10 @@ struct ocf_lru_list {
uint32_t tail;
uint32_t num_hot;
uint32_t last_hot;
bool track_hot;
};
struct lru_eviction_policy {
struct ocf_lru_part_meta {
struct ocf_lru_list clean;
struct ocf_lru_list dirty;
};

View File

@ -33,7 +33,7 @@ struct ocf_req_info {
uint32_t mapping_error : 1;
/*!< Core lines in this request were not mapped into cache */
uint32_t clean_eviction : 1;
uint32_t cleaning_required : 1;
/*!< Eviction failed, need to request cleaning */
uint32_t core_error : 1;
@ -403,14 +403,14 @@ static inline bool ocf_req_test_mapping_error(struct ocf_request *req)
return req->info.mapping_error;
}
static inline void ocf_req_set_clean_eviction(struct ocf_request *req)
static inline void ocf_req_set_cleaning_required(struct ocf_request *req)
{
req->info.clean_eviction = true;
req->info.cleaning_required = true;
}
static inline bool ocf_req_test_clean_eviction(struct ocf_request *req)
static inline bool ocf_req_is_cleaning_required(struct ocf_request *req)
{
return req->info.clean_eviction;
return req->info.cleaning_required;
}
/**

View File

@ -19,8 +19,8 @@ static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache,
if (!ocf_cache_is_device_attached(cache))
return false;
return (ocf_freelist_num_free(cache->freelist) <=
SEQ_CUTOFF_FULL_MARGIN + req->core_line_count);
return (ocf_lru_num_free(cache) <= SEQ_CUTOFF_FULL_MARGIN +
req->core_line_count);
}
static int ocf_seq_cutoff_stream_cmp(struct ocf_rb_node *n1,

View File

@ -3,31 +3,16 @@
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
#include "ops.h"
#include "../utils/utils_part.h"
#include "ocf_space.h"
#include "../utils/utils_user_part.h"
#include "../engine/engine_common.h"
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
[ocf_eviction_lru] = {
.init_cline = evp_lru_init_cline,
.rm_cline = evp_lru_rm_cline,
.req_clines = evp_lru_req_clines,
.hot_cline = evp_lru_hot_cline,
.init_evp = evp_lru_init_evp,
.dirty_cline = evp_lru_dirty_cline,
.clean_cline = evp_lru_clean_cline,
.flush_dirty = evp_lru_clean,
.name = "lru",
},
};
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
struct ocf_user_part *part, uint32_t to_evict)
struct ocf_user_part *user_part, uint32_t to_evict)
{
uint32_t curr_part_size = ocf_part_get_occupancy(part);
uint32_t min_part_size = ocf_part_get_min_size(cache, part);
uint32_t curr_part_size = ocf_part_get_occupancy(&user_part->part);
uint32_t min_part_size = ocf_user_part_get_min_size(cache, user_part);
if (curr_part_size <= min_part_size) {
/*
@ -44,15 +29,12 @@ static uint32_t ocf_evict_calculate(ocf_cache_t cache,
}
static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
struct ocf_user_part *target_part)
struct ocf_user_part *user_part)
{
uint32_t unmapped = ocf_engine_unmapped_count(req);
uint32_t to_evict = 0;
if (!evp_lru_can_evict(req->cache))
return 0;
to_evict = ocf_evict_calculate(req->cache, target_part, unmapped);
to_evict = ocf_evict_calculate(req->cache, user_part, unmapped);
if (to_evict < unmapped) {
/* cannot evict enough cachelines to map request,
@ -60,34 +42,31 @@ static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
return 0;
}
return ocf_eviction_need_space(req->cache, req, target_part, to_evict);
return ocf_lru_req_clines(req, &user_part->part, to_evict);
}
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
static inline uint32_t ocf_evict_user_partitions(ocf_cache_t cache,
struct ocf_request *req, uint32_t evict_cline_no,
bool overflown_only, int16_t max_priority)
{
uint32_t to_evict = 0, evicted = 0;
struct ocf_user_part *part;
struct ocf_user_part *user_part;
ocf_part_id_t part_id;
unsigned overflow_size;
/* For each partition from the lowest priority to highest one */
for_each_part(cache, part, part_id) {
if (!ocf_eviction_can_evict(cache))
goto out;
for_each_user_part(cache, user_part, part_id) {
/*
* Check stop and continue conditions
*/
if (max_priority > part->config->priority) {
if (max_priority > user_part->config->priority) {
/*
* iterate partition have higher priority,
* do not evict
*/
break;
}
if (!overflown_only && !part->config->flags.eviction) {
if (!overflown_only && !user_part->config->flags.eviction) {
/* If partition is overflown it should be evcited
* even if its pinned
*/
@ -95,12 +74,12 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
}
if (overflown_only) {
overflow_size = ocf_part_overflow_size(cache, part);
overflow_size = ocf_user_part_overflow_size(cache, user_part);
if (overflow_size == 0)
continue;
}
to_evict = ocf_evict_calculate(cache, part,
to_evict = ocf_evict_calculate(cache, user_part,
evict_cline_no - evicted);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
@ -110,7 +89,7 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
if (overflown_only)
to_evict = OCF_MIN(to_evict, overflow_size);
evicted += ocf_eviction_need_space(cache, req, part, to_evict);
evicted += ocf_lru_req_clines(req, &user_part->part, to_evict);
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop
@ -124,48 +103,55 @@ out:
return evicted;
}
static inline uint32_t ocf_evict_do(struct ocf_request *req)
static inline uint32_t ocf_remap_do(struct ocf_request *req)
{
ocf_cache_t cache = req->cache;
ocf_part_id_t target_part_id = req->part_id;
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
uint32_t evict_cline_no = ocf_engine_unmapped_count(req);
uint32_t evicted;
uint32_t remap_cline_no = ocf_engine_unmapped_count(req);
uint32_t remapped = 0;
/* First attempt to evict overflown partitions in order to
/* First attempt to map from freelist */
if (ocf_lru_num_free(cache) > 0)
remapped = ocf_lru_req_clines(req, &cache->free, remap_cline_no);
if (remapped >= remap_cline_no)
return remapped;
/* Attempt to evict overflown partitions in order to
* achieve configured maximum size. Ignoring partitions
* priority in this case, as overflown partitions should
* free its cachelines regardless of destination partition
* priority. */
evicted = ocf_evict_partitions(cache, req, evict_cline_no,
remapped += ocf_evict_user_partitions(cache, req, remap_cline_no,
true, OCF_IO_CLASS_PRIO_PINNED);
if (evicted >= evict_cline_no)
return evicted;
if (remapped >= remap_cline_no)
return remapped;
/* Not enough cachelines in overflown partitions. Go through
* partitions with priority <= target partition and attempt
* to evict from those. */
evict_cline_no -= evicted;
evicted += ocf_evict_partitions(cache, req, evict_cline_no,
remap_cline_no -= remapped;
remapped += ocf_evict_user_partitions(cache, req, remap_cline_no,
false, target_part->config->priority);
return evicted;
return remapped;
}
int space_managment_evict_do(struct ocf_request *req)
int ocf_space_managment_remap_do(struct ocf_request *req)
{
uint32_t needed = ocf_engine_unmapped_count(req);
uint32_t evicted;
uint32_t remapped;
struct ocf_user_part *req_part = &req->cache->user_parts[req->part_id];
if (ocf_req_part_evict(req)) {
evicted = ocf_evict_part_do(req, req_part);
remapped = ocf_evict_part_do(req, req_part);
} else {
evicted = ocf_evict_do(req);
remapped = ocf_remap_do(req);
}
if (needed <= evicted)
return LOOKUP_INSERTED;
if (needed <= remapped)
return LOOKUP_REMAPPED;
return LOOKUP_MISS;
}

39
src/ocf_space.h Normal file
View File

@ -0,0 +1,39 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_EVICTION_POLICY_H__
#define __LAYER_EVICTION_POLICY_H__
#include "ocf/ocf.h"
#include "ocf_lru.h"
#include "ocf_lru_structs.h"
#define OCF_PENDING_EVICTION_LIMIT 512UL
#define OCF_NUM_LRU_LISTS 32
struct ocf_part;
struct ocf_user_part;
struct ocf_part_runtime;
struct ocf_part_cleaning_ctx;
struct ocf_request;
/*
* Deallocates space according to eviction priorities.
*
* @returns:
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
* 'LOOKUP_MISS' otherwise
*/
int ocf_space_managment_remap_do(struct ocf_request *req);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif

View File

@ -7,7 +7,7 @@
#include "ocf_priv.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_part.h"
#include "utils/utils_user_part.h"
#include "utils/utils_cache_line.h"
#ifdef OCF_DEBUG_STATS
@ -195,7 +195,7 @@ void ocf_core_stats_initialize(ocf_core_t core)
ocf_stats_error_init(&exp_obj_stats->cache_errors);
ocf_stats_error_init(&exp_obj_stats->core_errors);
for (i = 0; i != OCF_IO_CLASS_MAX; i++)
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++)
ocf_stats_part_init(&exp_obj_stats->part_counters[i]);
#ifdef OCF_DEBUG_STATS
@ -286,7 +286,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
cache = ocf_core_get_cache(core);
if (!ocf_part_is_valid(&cache->user_parts[part_id]))
if (!ocf_user_part_is_valid(&cache->user_parts[part_id]))
return -OCF_ERR_IO_CLASS_NOT_EXIST;
part_stat = &core->counters->part_counters[part_id];
@ -333,7 +333,7 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
&core_stats->debug_stats);
#endif
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
curr = &core_stats->part_counters[i];
accum_req_stats(&stats->read_reqs,

View File

@ -7,7 +7,7 @@
#include "ocf_priv.h"
#include "metadata/metadata.h"
#include "engine/cache_engine.h"
#include "utils/utils_part.h"
#include "utils/utils_user_part.h"
#include "utils/utils_cache_line.h"
#include "utils/utils_stats.h"

View File

@ -176,7 +176,7 @@ struct ocf_counters_core {
struct ocf_counters_error core_errors;
struct ocf_counters_error cache_errors;
struct ocf_counters_part part_counters[OCF_IO_CLASS_MAX];
struct ocf_counters_part part_counters[OCF_USER_IO_CLASS_MAX];
#ifdef OCF_DEBUG_STATS
struct ocf_counters_debug debug_stats;
#endif

View File

@ -222,7 +222,7 @@ bool nhit_req_should_promote(ocf_promotion_policy_t policy,
uint64_t core_line;
uint64_t occupied_cachelines =
ocf_metadata_collision_table_entries(policy->owner) -
ocf_freelist_num_free(policy->owner->freelist);
ocf_lru_num_free(policy->owner);
cfg = (struct nhit_promotion_policy_config*)policy->config;

View File

@ -790,23 +790,6 @@ bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
return empty;
}
/* NOTE: it is caller responsibility to assure that noone acquires
* a lock in background */
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int val = env_atomic_read(access);
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
if (!ocf_alock_waitlist_is_empty(alock, entry))
return false;
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
val == OCF_CACHE_LINE_ACCESS_WR;
}
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);

View File

@ -53,9 +53,6 @@ int ocf_alock_lock_wr(struct ocf_alock *alock,
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
ocf_cache_line_t entry);
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry);
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock);
size_t ocf_alock_obj_size(void);

View File

@ -102,10 +102,12 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
{
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
struct ocf_part *part = &cache->user_parts[part_id].part;
bool line_is_clean;
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit,
&line_is_clean)) {
ocf_metadata_flush_mark(cache, req, map_idx, CLEAN, start_bit,
@ -130,10 +132,7 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
*/
env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].clean_cline))
evict_policy_ops[evp_type].clean_cline(cache, part, line);
ocf_lru_clean_cline(cache, part, line);
ocf_purge_cleaning_policy(cache, line);
}
}
@ -145,10 +144,12 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
{
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
struct ocf_part *part;
bool line_was_dirty;
ENV_BUG_ON(part_id > OCF_USER_IO_CLASS_MAX);
part = &cache->user_parts[part_id].part;
if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit,
&line_was_dirty)) {
ocf_metadata_flush_mark(cache, req, map_idx, DIRTY, start_bit,
@ -173,9 +174,7 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
*/
env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].dirty_cline))
evict_policy_ops[evp_type].dirty_cline(cache, part, line);
ocf_lru_dirty_cline(cache, part, line);
}
}

View File

@ -8,8 +8,7 @@
#include "../metadata/metadata.h"
#include "../concurrency/ocf_cache_line_concurrency.h"
#include "../eviction/eviction.h"
#include "../eviction/ops.h"
#include "../ocf_space.h"
#include "../engine/cache_engine.h"
#include "../ocf_request.h"
#include "../ocf_def_priv.h"
@ -180,7 +179,7 @@ static inline void ocf_purge_cleaning_policy(struct ocf_cache *cache,
static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_eviction_purge_cache_line(cache, line);
ocf_lru_rm_cline(cache, line);
}
/**

View File

@ -9,7 +9,7 @@
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_request.h"
#include "utils_cleaner.h"
#include "utils_part.h"
#include "utils_user_part.h"
#include "utils_io.h"
#include "utils_cache_line.h"
#include "../ocf_queue_priv.h"
@ -1052,7 +1052,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
struct ocf_user_part *curr_part;
ocf_part_id_t part_id;
for_each_part(cache, curr_part, part_id)
for_each_user_part(cache, curr_part, part_id)
ocf_refcnt_freeze(&curr_part->cleaning.counter);
}
@ -1061,7 +1061,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
struct ocf_user_part *curr_part;
ocf_part_id_t part_id;
for_each_part(cache, curr_part, part_id)
for_each_user_part(cache, curr_part, part_id)
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
}
@ -1084,7 +1084,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
ctx->cb = cb;
ctx->priv = priv;
for_each_part(cache, curr_part, part_id) {
for_each_user_part(cache, curr_part, part_id) {
env_atomic_inc(&ctx->waiting);
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);

View File

@ -1,180 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_PARTITION_H__
#define __UTILS_PARTITION_H__
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_part_init(struct ocf_cache *cache);
static inline bool ocf_part_is_valid(struct ocf_user_part *part)
{
return !!part->config->flags.valid;
}
static inline void ocf_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *part, int16_t prio)
{
if (part->config->priority != prio)
part->config->priority = prio;
}
static inline int16_t ocf_part_get_prio(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->priority;
return OCF_IO_CLASS_PRIO_LOWEST;
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid);
static inline bool ocf_part_is_added(struct ocf_user_part *part)
{
return !!part->config->flags.added;
}
static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
{
if (class < OCF_IO_CLASS_MAX)
if (cache->user_parts[class].config->flags.valid)
return class;
return PARTITION_DEFAULT;
}
static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part)
{
return part->runtime->curr_size;
}
static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size;
ioclass_size = (uint64_t)part->config->min_size *
(uint64_t)cache->conf_meta->cachelines;
ioclass_size /= 100;
return (uint32_t)ioclass_size;
}
static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size, max_size, cache_size;
max_size = part->config->max_size;
cache_size = cache->conf_meta->cachelines;
ioclass_size = max_size * cache_size;
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
return (uint32_t)ioclass_size;
}
void ocf_part_move(struct ocf_request *req);
#define for_each_part(cache, part, id) \
for_each_lst_entry(&cache->lst_part, part, id, \
struct ocf_user_part, lst_valid)
static inline void ocf_part_sort(struct ocf_cache *cache)
{
ocf_lst_sort(&cache->lst_part);
}
static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
{
return part->config->max_size != 0;
}
static inline uint32_t ocf_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *part)
{
uint32_t part_occupancy = ocf_part_get_occupancy(part);
uint32_t part_occupancy_limit = ocf_part_get_max_size(cache, part);
if (part_occupancy > part_occupancy_limit)
return part_occupancy - part_occupancy_limit;
return 0;
}
static inline bool ocf_part_has_space(struct ocf_request *req)
{
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
uint64_t part_occupancy_limit =
ocf_part_get_max_size(req->cache, target_part);
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
uint64_t part_occupancy = ocf_part_get_occupancy(target_part);
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
}
static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->cache_mode;
return ocf_cache_mode_none;
}
static inline bool ocf_part_is_prio_valid(int64_t prio)
{
switch (prio) {
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
case OCF_IO_CLASS_PRIO_PINNED:
return true;
default:
return false;
}
}
/**
* routine checks for validity of a partition name.
*
* Following condition is checked:
* - string too long
* - string containing invalid characters (outside of low ascii)
* Following condition is NOT cheched:
* - empty string. (empty string is NOT a valid partition name, but
* this function returns true on empty string nevertheless).
*
* @return returns true if partition name is a valid name
*/
static inline bool ocf_part_is_name_valid(const char *name)
{
uint32_t length = 0;
while (*name) {
if (*name < ' ' || *name > '~')
return false;
if (',' == *name || '"' == *name)
return false;
name++;
length++;
if (length >= OCF_IO_CLASS_NAME_MAX)
return false;
}
return true;
}
#endif /* __UTILS_PARTITION_H__ */

View File

@ -8,18 +8,18 @@
#include "../ocf_request.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../eviction/ops.h"
#include "utils_part.h"
#include "../ocf_lru.h"
#include "utils_user_part.h"
static struct ocf_lst_entry *ocf_part_lst_getter_valid(
static struct ocf_lst_entry *ocf_user_part_lst_getter_valid(
struct ocf_cache *cache, ocf_cache_line_t idx)
{
ENV_BUG_ON(idx > OCF_IO_CLASS_MAX);
ENV_BUG_ON(idx > OCF_USER_IO_CLASS_MAX);
return &cache->user_parts[idx].lst_valid;
}
static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
static int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
{
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
@ -27,10 +27,11 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->runtime->curr_size : 0;
env_atomic_read(&p1->part.runtime->curr_size)
: 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->runtime->curr_size : 0;
env_atomic_read(&p2->part.runtime->curr_size)
: 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;
@ -79,13 +80,19 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
return v2 - v1;
}
void ocf_part_init(struct ocf_cache *cache)
void ocf_user_part_init(struct ocf_cache *cache)
{
ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX,
ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid);
unsigned i;
ocf_lst_init(cache, &cache->user_part_list, OCF_USER_IO_CLASS_MAX,
ocf_user_part_lst_getter_valid,
ocf_user_part_lst_cmp_valid);
for (i = 0; i < OCF_USER_IO_CLASS_MAX + 1; i++)
cache->user_parts[i].part.id = i;
}
void ocf_part_move(struct ocf_request *req)
void ocf_user_part_move(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
struct ocf_map_info *entry;
@ -104,11 +111,11 @@ void ocf_part_move(struct ocf_request *req)
}
/* Moving cachelines to another partition is needed only
* for those already mapped before this request, which
* indicates either HIT or REMAPPED.
* for those already mapped before this request and remapped
* cachelines are assigned to target partition during eviction.
* So only hit cachelines are interesting.
*/
if (entry->status != LOOKUP_HIT &&
entry->status != LOOKUP_REMAPPED) {
if (entry->status != LOOKUP_HIT) {
/* No HIT */
continue;
}
@ -117,8 +124,8 @@ void ocf_part_move(struct ocf_request *req)
id_old = ocf_metadata_get_partition_id(cache, line);
id_new = req->part_id;
ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
id_new >= OCF_IO_CLASS_MAX);
ENV_BUG_ON(id_old >= OCF_USER_IO_CLASS_MAX ||
id_new >= OCF_USER_IO_CLASS_MAX);
if (id_old == id_new) {
/* Partition of the request and cache line is the same,
@ -127,9 +134,6 @@ void ocf_part_move(struct ocf_request *req)
continue;
}
/* Remove from old eviction */
ocf_eviction_purge_cache_line(cache, line);
if (metadata_test_dirty(cache, line)) {
/*
* Remove cline from cleaning - this if for ioclass
@ -142,13 +146,8 @@ void ocf_part_move(struct ocf_request *req)
purge_cache_block(cache, line);
}
/* Let's change partition */
ocf_metadata_remove_from_partition(cache, id_old, line);
ocf_metadata_add_to_partition(cache, id_new, line);
/* Add to new eviction */
ocf_eviction_init_cache_line(cache, line);
ocf_eviction_set_hot_cache_line(cache, line);
ocf_lru_repart(cache, line, &cache->user_parts[id_old].part,
&cache->user_parts[id_new].part);
/* Check if cache line is dirty. If yes then need to change
* cleaning policy and update partition dirty clines
@ -175,22 +174,23 @@ void ocf_part_move(struct ocf_request *req)
}
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid)
{
struct ocf_user_part *part = &cache->user_parts[id];
struct ocf_user_part *user_part = &cache->user_parts[id];
if (valid ^ part->config->flags.valid) {
if (valid ^ user_part->config->flags.valid) {
if (valid) {
part->config->flags.valid = true;
user_part->config->flags.valid = true;
cache->conf_meta->valid_parts_no++;
} else {
part->config->flags.valid = false;
user_part->config->flags.valid = false;
cache->conf_meta->valid_parts_no--;
part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
part->config->min_size = 0;
part->config->max_size = PARTITION_SIZE_MAX;
ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name),
user_part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
user_part->config->min_size = 0;
user_part->config->max_size = PARTITION_SIZE_MAX;
ENV_BUG_ON(env_strncpy(user_part->config->name,
sizeof(user_part->config->name),
"Inactive", 9));
}
}

181
src/utils/utils_user_part.h Normal file
View File

@ -0,0 +1,181 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_PARTITION_H__
#define __UTILS_PARTITION_H__
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_user_part_init(struct ocf_cache *cache);
static inline bool ocf_user_part_is_valid(struct ocf_user_part *user_part)
{
return !!user_part->config->flags.valid;
}
static inline void ocf_user_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *user_part, int16_t prio)
{
if (user_part->config->priority != prio)
user_part->config->priority = prio;
}
static inline int16_t ocf_user_part_get_prio(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_USER_IO_CLASS_MAX)
return cache->user_parts[part_id].config->priority;
return OCF_IO_CLASS_PRIO_LOWEST;
}
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid);
static inline bool ocf_user_part_is_added(struct ocf_user_part *user_part)
{
return !!user_part->config->flags.added;
}
static inline ocf_part_id_t ocf_user_part_class2id(ocf_cache_t cache, uint64_t class)
{
if (class < OCF_USER_IO_CLASS_MAX)
if (cache->user_parts[class].config->flags.valid)
return class;
return PARTITION_DEFAULT;
}
static inline uint32_t ocf_part_get_occupancy(struct ocf_part *part)
{
return env_atomic_read(&part->runtime->curr_size);
}
static inline uint32_t ocf_user_part_get_min_size(ocf_cache_t cache,
struct ocf_user_part *user_part)
{
uint64_t ioclass_size;
ioclass_size = (uint64_t)user_part->config->min_size *
(uint64_t)cache->conf_meta->cachelines;
ioclass_size /= 100;
return (uint32_t)ioclass_size;
}
static inline uint32_t ocf_user_part_get_max_size(ocf_cache_t cache,
struct ocf_user_part *user_part)
{
uint64_t ioclass_size, max_size, cache_size;
max_size = user_part->config->max_size;
cache_size = cache->conf_meta->cachelines;
ioclass_size = max_size * cache_size;
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
return (uint32_t)ioclass_size;
}
void ocf_user_part_move(struct ocf_request *req);
#define for_each_user_part(cache, user_part, id) \
for_each_lst_entry(&cache->user_part_list, user_part, id, \
struct ocf_user_part, lst_valid)
static inline void ocf_user_part_sort(struct ocf_cache *cache)
{
ocf_lst_sort(&cache->user_part_list);
}
static inline bool ocf_user_part_is_enabled(struct ocf_user_part *user_part)
{
return user_part->config->max_size != 0;
}
static inline uint32_t ocf_user_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *user_part)
{
uint32_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
uint32_t part_occupancy_limit = ocf_user_part_get_max_size(cache,
user_part);
if (part_occupancy > part_occupancy_limit)
return part_occupancy - part_occupancy_limit;
return 0;
}
static inline bool ocf_user_part_has_space(struct ocf_request *req)
{
struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id];
uint64_t part_occupancy_limit =
ocf_user_part_get_max_size(req->cache, user_part);
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
uint64_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
}
static inline ocf_cache_mode_t ocf_user_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_USER_IO_CLASS_MAX)
return cache->user_parts[part_id].config->cache_mode;
return ocf_cache_mode_none;
}
static inline bool ocf_user_part_is_prio_valid(int64_t prio)
{
switch (prio) {
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
case OCF_IO_CLASS_PRIO_PINNED:
return true;
default:
return false;
}
}
/**
* routine checks for validity of a partition name.
*
* Following condition is checked:
* - string too long
* - string containing invalid characters (outside of low ascii)
* Following condition is NOT cheched:
* - empty string. (empty string is NOT a valid partition name, but
* this function returns true on empty string nevertheless).
*
* @return returns true if partition name is a valid name
*/
static inline bool ocf_user_part_is_name_valid(const char *name)
{
uint32_t length = 0;
while (*name) {
if (*name < ' ' || *name > '~')
return false;
if (',' == *name || '"' == *name)
return false;
name++;
length++;
if (length >= OCF_IO_CLASS_NAME_MAX)
return false;
}
return true;
}
#endif /* __UTILS_PARTITION_H__ */

View File

@ -48,7 +48,6 @@ class CacheConfig(Structure):
_fields_ = [
("_name", c_char * MAX_CACHE_NAME_SIZE),
("_cache_mode", c_uint32),
("_eviction_policy", c_uint32),
("_promotion_policy", c_uint32),
("_cache_line_size", c_uint64),
("_metadata_layout", c_uint32),
@ -115,11 +114,6 @@ class CacheMode(IntEnum):
return self.value not in [CacheMode.PT, CacheMode.WO]
class EvictionPolicy(IntEnum):
LRU = 0
DEFAULT = LRU
class PromotionPolicy(IntEnum):
ALWAYS = 0
NHIT = 1
@ -167,7 +161,6 @@ class Cache:
owner,
name: str = "cache",
cache_mode: CacheMode = CacheMode.DEFAULT,
eviction_policy: EvictionPolicy = EvictionPolicy.DEFAULT,
promotion_policy: PromotionPolicy = PromotionPolicy.DEFAULT,
cache_line_size: CacheLineSize = CacheLineSize.DEFAULT,
metadata_layout: MetadataLayout = MetadataLayout.DEFAULT,
@ -186,7 +179,6 @@ class Cache:
self.cfg = CacheConfig(
_name=name.encode("ascii"),
_cache_mode=cache_mode,
_eviction_policy=eviction_policy,
_promotion_policy=promotion_policy,
_cache_line_size=cache_line_size,
_metadata_layout=metadata_layout,
@ -351,7 +343,6 @@ class Cache:
"_curr_size": (ioclass_info._curr_size),
"_min_size": int(ioclass_info._min_size),
"_max_size": int(ioclass_info._max_size),
"_eviction_policy_type": int(ioclass_info._eviction_policy_type),
"_cleaning_policy_type": int(ioclass_info._cleaning_policy_type),
}
@ -625,7 +616,6 @@ class Cache:
"status": cache_info.fallback_pt.status,
},
"state": cache_info.state,
"eviction_policy": EvictionPolicy(cache_info.eviction_policy),
"cleaning_policy": CleaningPolicy(cache_info.cleaning_policy),
"promotion_policy": PromotionPolicy(cache_info.promotion_policy),
"cache_line_size": line_size,

View File

@ -15,7 +15,6 @@ class IoClassInfo(Structure):
("_curr_size", c_uint32),
("_min_size", c_uint32),
("_max_size", c_uint32),
("_eviction_policy_type", c_uint8),
("_cleaning_policy_type", c_int),
]

View File

@ -28,7 +28,6 @@ class CacheInfo(Structure):
("dirty_for", c_uint64),
("cache_mode", c_uint32),
("fallback_pt", _FallbackPt),
("eviction_policy", c_uint32),
("cleaning_policy", c_uint32),
("promotion_policy", c_uint32),
("cache_line_size", c_uint64),

View File

@ -15,7 +15,6 @@ from pyocf.types.cache import (
Cache,
CacheMode,
MetadataLayout,
EvictionPolicy,
CleaningPolicy,
)
from pyocf.types.core import Core

View File

@ -11,7 +11,7 @@ from itertools import count
import pytest
from pyocf.ocf import OcfLib
from pyocf.types.cache import Cache, CacheMode, MetadataLayout, EvictionPolicy, CleaningPolicy
from pyocf.types.cache import Cache, CacheMode, MetadataLayout, CleaningPolicy
from pyocf.types.core import Core
from pyocf.types.data import Data
from pyocf.types.io import IoDir
@ -38,7 +38,6 @@ def test_start_check_default(pyocf_ctx):
assert stats["conf"]["cleaning_policy"] == CleaningPolicy.DEFAULT
assert stats["conf"]["cache_mode"] == CacheMode.DEFAULT
assert stats["conf"]["cache_line_size"] == CacheLineSize.DEFAULT
assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT
core_stats = core.get_stats()
assert core_stats["seq_cutoff_policy"] == SeqCutOffPolicy.DEFAULT
@ -156,7 +155,6 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cls, "Cache line size"
assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy"
assert cache.get_name() == name, "Cache name"
# TODO: metadata_layout, metadata_volatile, max_queue_size,
# queue_unblock_size, pt_unaligned_io, use_submit_fast

View File

@ -13,7 +13,7 @@ from ctypes import (
)
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
from pyocf.types.cache import CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
from pyocf.types.cache import CacheMode, MetadataLayout, PromotionPolicy
from pyocf.types.shared import CacheLineSize
import pytest
@ -77,13 +77,6 @@ def not_cache_line_size_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(EvictionPolicy))
)
def not_eviction_policy_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(PromotionPolicy))
)

View File

@ -7,7 +7,7 @@ import logging
import pytest
from pyocf.types.cache import Cache, CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
from pyocf.types.cache import Cache, CacheMode, MetadataLayout, PromotionPolicy
from pyocf.types.shared import OcfError, CacheLineSize
from pyocf.types.volume import Volume
from pyocf.utils import Size
@ -73,25 +73,6 @@ def test_fuzzy_start_name(pyocf_ctx, string_randomize, cm, cls):
cache.stop()
@pytest.mark.security
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_fuzzy_start_eviction_policy(pyocf_ctx, not_eviction_policy_randomize, cm, cls):
"""
Test whether it is impossible to start cache with invalid eviction policy value.
:param pyocf_ctx: basic pyocf context fixture
:param c_uint32_randomize: eviction policy enum value to start cache with
:param cm: cache mode value to start cache with
:param cls: cache line size value to start cache with
"""
with pytest.raises(OcfError, match="OCF_ERR_INVAL"):
try_start_cache(
eviction_policy=not_eviction_policy_randomize,
cache_mode=cm,
cache_line_size=cls
)
@pytest.mark.security
@pytest.mark.parametrize("cm", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)

View File

@ -16,7 +16,7 @@ MAIN_DIRECTORY_OF_UNIT_TESTS = "../tests/"
# Paths to all directories, in which tests are stored. All paths should be relative to
# MAIN_DIRECTORY_OF_UNIT_TESTS
DIRECTORIES_WITH_TESTS_LIST = ["cleaning/", "metadata/", "mngt/", "concurrency/", "engine/",
"eviction/", "utils/", "promotion/", "ocf_freelist.c/"]
"ocf_space.c/", "ocf_lru.c/", "utils/", "promotion/"]
# Paths to all directories containing files with sources. All paths should be relative to
# MAIN_DIRECTORY_OF_TESTED_PROJECT

View File

@ -30,7 +30,7 @@
#include "alru.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_realloc.h"
#include "../concurrency/ocf_cache_line_concurrency.h"
#include "../ocf_def_priv.h"
@ -49,7 +49,9 @@ static void cleaning_policy_alru_initialize_test01(void **state)
print_test_description("Check if all variables are set correctly");
cache = test_malloc(sizeof(*cache));
cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
cache->user_parts[part_id].part.runtime = test_malloc(sizeof(struct ocf_part_runtime));
cache->user_parts[part_id].clean_pol = test_malloc(sizeof(*cache->user_parts[part_id].clean_pol));
cache->user_parts[part_id].part.id = part_id;
cache->device = test_malloc(sizeof(struct ocf_cache_device));
cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
@ -59,15 +61,16 @@ static void cleaning_policy_alru_initialize_test01(void **state)
assert_int_equal(result, 0);
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 0);
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, collision_table_entries);
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, collision_table_entries);
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].clean_pol->policy.alru.size), 0);
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_head, collision_table_entries);
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_tail, collision_table_entries);
assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
test_free(cache->device->runtime_meta);
test_free(cache->device);
test_free(cache->user_parts[part_id].runtime);
test_free(cache->user_parts[part_id].clean_pol);
test_free(cache->user_parts[part_id].part.runtime);
test_free(cache);
}
@ -82,27 +85,29 @@ static void cleaning_policy_alru_initialize_test02(void **state)
print_test_description("Check if only appropirate variables are changed");
cache = test_malloc(sizeof(*cache));
cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
cache->user_parts[part_id].part.runtime = test_malloc(sizeof(struct ocf_part_runtime));
cache->user_parts[part_id].clean_pol = test_malloc(sizeof(*cache->user_parts[part_id].clean_pol));
cache->device = test_malloc(sizeof(struct ocf_cache_device));
cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
env_atomic_set(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size, 1);
cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head = -collision_table_entries;
cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail = -collision_table_entries;
env_atomic_set(&cache->user_parts[part_id].clean_pol->policy.alru.size, 1);
cache->user_parts[part_id].clean_pol->policy.alru.lru_head = -collision_table_entries;
cache->user_parts[part_id].clean_pol->policy.alru.lru_tail = -collision_table_entries;
result = cleaning_policy_alru_initialize_part(cache, cache->user_parts[part_id], 0, 0);
result = cleaning_policy_alru_initialize_part(cache, &cache->user_parts[part_id], 0, 0);
assert_int_equal(result, 0);
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 1);
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, -collision_table_entries);
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, -collision_table_entries);
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].clean_pol->policy.alru.size), 1);
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_head, -collision_table_entries);
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_tail, -collision_table_entries);
assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
test_free(cache->device->runtime_meta);
test_free(cache->device);
test_free(cache->user_parts[part_id].runtime);
test_free(cache->user_parts[part_id].clean_pol);
test_free(cache->user_parts[part_id].part.runtime);
test_free(cache);
}

View File

@ -3,7 +3,11 @@
* <tested_function>ocf_prepare_clines_miss</tested_function>
* <functions_to_leave>
* ocf_prepare_clines_evict
* ocf_engine_evict
* ocf_engine_remap
* ocf_req_set_mapping_error
* ocf_req_test_mapping_error
* ocf_req_set_part_evict
* ocf_req_part_evict
* </functions_to_leave>
*/
@ -22,15 +26,14 @@
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "../ocf_freelist.h"
#include "engine_common.h"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
#include "../ocf_request.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../ocf_space.h"
#include "../promotion/promotion.h"
#include "../concurrency/ocf_concurrency.h"
@ -49,7 +52,7 @@ void __wrap_ocf_req_hash_unlock_wr(struct ocf_request *req)
{
}
uint32_t __wrap_ocf_part_has_space(struct ocf_request *req)
uint32_t __wrap_ocf_user_part_has_space(struct ocf_request *req)
{
return mock();
}
@ -71,45 +74,27 @@ void __wrap_ocf_metadata_end_exclusive_access(
{
}
bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
{
return mock();
}
void __wrap_ocf_engine_map(struct ocf_request *req)
{
function_called();
}
bool __wrap_ocf_req_test_mapping_error(struct ocf_request *req)
{
return mock();
}
void __wrap_ocf_req_set_mapping_error(struct ocf_request *req)
{
function_called();
}
int __wrap_space_managment_evict_do(struct ocf_request *req)
int __wrap_ocf_space_managment_remap_do(struct ocf_request *req)
{
function_called();
return mock();
}
uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
{
return 100;
}
static void ocf_prepare_clines_miss_test01(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is disabled and empty\n");
will_return(__wrap_ocf_part_is_enabled, false);
expect_function_call(__wrap_ocf_req_set_mapping_error);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
print_test_description("Target part doesn't have enough space.\n");
print_test_description("\tEviction success\n");
will_return_always(__wrap_ocf_user_part_has_space, false);
expect_function_call(__wrap_ocf_space_managment_remap_do);
will_return_always(__wrap_ocf_space_managment_remap_do, LOOKUP_REMAPPED);
ocf_prepare_clines_miss(&req);
assert(!ocf_req_test_mapping_error(&req));
assert(ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test02(void **state)
@ -117,13 +102,17 @@ static void ocf_prepare_clines_miss_test02(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is disabled but has cachelines assigned.\n");
print_test_description("\tMark mapping error\n");
print_test_description("Target part doesn't have enough space.\n");
print_test_description("\tEviction failed\n");
will_return(__wrap_ocf_part_is_enabled, false);
expect_function_call(__wrap_ocf_req_set_mapping_error);
will_return_always(__wrap_ocf_user_part_has_space, false);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
expect_function_call(__wrap_ocf_space_managment_remap_do);
will_return(__wrap_ocf_space_managment_remap_do, LOOKUP_MISS);
ocf_prepare_clines_miss(&req);
assert(ocf_req_test_mapping_error(&req));
assert(ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test03(void **state)
@ -131,20 +120,16 @@ static void ocf_prepare_clines_miss_test03(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
print_test_description("Target part has enough space.\n");
print_test_description("\tEviction success\n");
will_return(__wrap_ocf_part_is_enabled, true);
will_return_always(__wrap_ocf_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return_always(__wrap_space_managment_evict_do, LOOKUP_INSERTED);
will_return_always(__wrap_ocf_user_part_has_space, true);
expect_function_call(__wrap_ocf_space_managment_remap_do);
will_return_always(__wrap_ocf_space_managment_remap_do, LOOKUP_REMAPPED);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
will_return(__wrap_lock_clines, 0);
expect_function_call(__wrap_lock_clines);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), 0);
ocf_prepare_clines_miss(&req);
assert(!ocf_req_test_mapping_error(&req));
assert(!ocf_req_part_evict(&req));
}
static void ocf_prepare_clines_miss_test04(void **state)
@ -152,85 +137,17 @@ static void ocf_prepare_clines_miss_test04(void **state)
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Target part has enough space.\n");
print_test_description("\tEviction failed\n");
will_return(__wrap_ocf_part_is_enabled, true);
will_return_always(__wrap_ocf_part_has_space, false);
will_return_always(__wrap_ocf_user_part_has_space, true);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
expect_function_call(__wrap_ocf_req_set_mapping_error);
will_return_always(__wrap_ocf_req_test_mapping_error, true);
expect_function_call(__wrap_ocf_space_managment_remap_do);
will_return(__wrap_ocf_space_managment_remap_do, LOOKUP_MISS);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
}
static void ocf_prepare_clines_miss_test06(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
will_return_always(__wrap_ocf_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
will_return(__wrap_ocf_part_is_enabled, true);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, -OCF_ERR_NO_LOCK);
expect_function_call(__wrap_ocf_req_set_mapping_error);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
}
static void ocf_prepare_clines_miss_test07(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled but doesn't have enough space.\n");
print_test_description("Eviction and mapping were ok, lock not acquired.\n");
will_return_always(__wrap_ocf_part_has_space, false);
expect_function_call(__wrap_space_managment_evict_do);
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
will_return(__wrap_ocf_part_is_enabled, true);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, OCF_LOCK_NOT_ACQUIRED);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_NOT_ACQUIRED);
}
static void ocf_prepare_clines_miss_test08(void **state)
{
struct ocf_cache cache;
struct ocf_request req = {.cache = &cache };
print_test_description("Target part is enabled has enough space.\n");
print_test_description("\tMapping and cacheline lock are both ok\n");
will_return(__wrap_ocf_part_is_enabled, true);
will_return_always(__wrap_ocf_part_has_space, true);
expect_function_call(__wrap_ocf_engine_map);
will_return_always(__wrap_ocf_req_test_mapping_error, false);
expect_function_call(__wrap_lock_clines);
will_return(__wrap_lock_clines, OCF_LOCK_ACQUIRED);
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), OCF_LOCK_ACQUIRED);
ocf_prepare_clines_miss(&req);
assert(ocf_req_test_mapping_error(&req));
assert(!ocf_req_part_evict(&req));
}
int main(void)
@ -240,9 +157,6 @@ int main(void)
cmocka_unit_test(ocf_prepare_clines_miss_test02),
cmocka_unit_test(ocf_prepare_clines_miss_test03),
cmocka_unit_test(ocf_prepare_clines_miss_test04),
cmocka_unit_test(ocf_prepare_clines_miss_test06),
cmocka_unit_test(ocf_prepare_clines_miss_test07),
cmocka_unit_test(ocf_prepare_clines_miss_test08)
};
print_message("Unit test for ocf_prepare_clines_miss\n");

View File

@ -30,13 +30,13 @@ ocf_mngt_cache_mode_has_lazy_write
#include "../ocf_queue_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_pipeline.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
#include "../ocf_lru.h"
#include "../ocf_ctx_priv.h"
#include "../cleaning/cleaning.h"

View File

@ -24,13 +24,13 @@
#include "../ocf_queue_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../utils/utils_user_part.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_pipeline.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
#include "../ocf_lru.h"
#include "../ocf_ctx_priv.h"
#include "../cleaning/cleaning.h"

View File

@ -31,14 +31,14 @@
#include "../ocf_priv.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../eviction/ops.h"
#include "../utils/utils_user_part.h"
#include "../ocf_lru.h"
#include "ocf_env.h"
#include "mngt/ocf_mngt_io_class.c/ocf_mngt_io_class_generated_wraps.c"
/* Functions mocked for testing purposes */
bool __wrap_ocf_part_is_added(struct ocf_user_part *part)
bool __wrap_ocf_user_part_is_added(struct ocf_user_part *user_part)
{
function_called();
return mock();
@ -51,20 +51,20 @@ int __wrap__ocf_mngt_set_partition_size(struct ocf_cache *cache,
return mock();
}
void __wrap_ocf_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *part, int16_t prio)
void __wrap_ocf_user_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *user_part, int16_t prio)
{
function_called();
}
bool __wrap_ocf_part_is_valid(struct ocf_user_part *part)
bool __wrap_ocf_user_part_is_valid(struct ocf_user_part *user_part)
{
function_called();
return mock();
}
void __wrap_ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
void __wrap_ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid)
{
function_called();
@ -79,7 +79,7 @@ int __wrap__ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
return mock();
}
void __wrap_ocf_part_sort(struct ocf_cache *cache)
void __wrap_ocf_user_part_sort(struct ocf_cache *cache)
{
function_called();
}
@ -93,7 +93,7 @@ static inline void setup_valid_config(struct ocf_mngt_io_class_config *cfg,
bool remove)
{
int i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
cfg[i].class_id = i;
cfg[i].name = remove ? NULL : i == 0 ? "unclassified" :"test_io_class_name" ;
cfg[i].prio = i;
@ -112,7 +112,7 @@ static void ocf_mngt_io_classes_configure_test03(void **state)
cache = test_malloc(sizeof(*cache));
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
cache->user_parts[i].config =
test_malloc(sizeof(struct ocf_user_part_config));
}
@ -120,30 +120,30 @@ static void ocf_mngt_io_classes_configure_test03(void **state)
setup_valid_config(cfg.config, true);
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
}
/* Removing default io_class is not allowed */
for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
expect_function_call(__wrap_ocf_part_is_valid);
will_return(__wrap_ocf_part_is_valid, 1);
for (i = 1; i < OCF_USER_IO_CLASS_MAX; i++) {
expect_function_call(__wrap_ocf_user_part_is_valid);
will_return(__wrap_ocf_user_part_is_valid, 1);
expect_function_call(__wrap_ocf_part_set_valid);
expect_function_call(__wrap_ocf_user_part_set_valid);
/* Test assumes default partition has id equal 0 */
expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN + 1,
expect_in_range(__wrap_ocf_user_part_set_valid, id, OCF_IO_CLASS_ID_MIN + 1,
OCF_IO_CLASS_ID_MAX);
expect_value(__wrap_ocf_part_set_valid, valid, false);
expect_value(__wrap_ocf_user_part_set_valid, valid, false);
}
expect_function_call(__wrap_ocf_part_sort);
expect_function_call(__wrap_ocf_user_part_sort);
result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
assert_int_equal(result, 0);
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
test_free(cache->user_parts[i].config);
test_free(cache);
@ -157,7 +157,7 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
cache = test_malloc(sizeof(*cache));
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
cache->user_parts[i].config =
test_malloc(sizeof(struct ocf_user_part_config));
}
@ -169,46 +169,46 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
print_test_description("Configure all possible io classes");
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
}
/* Configure default io_class */
expect_function_call(__wrap_ocf_part_is_added);
will_return(__wrap_ocf_part_is_added, 1);
expect_function_call(__wrap_ocf_user_part_is_added);
will_return(__wrap_ocf_user_part_is_added, 1);
expect_function_call(__wrap__ocf_mngt_set_partition_size);
will_return(__wrap__ocf_mngt_set_partition_size, 0);
expect_function_call(__wrap_ocf_part_set_prio);
expect_function_call(__wrap_ocf_user_part_set_prio);
/* Configure custom io_classes */
for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
expect_function_call(__wrap_ocf_part_is_added);
will_return(__wrap_ocf_part_is_added, 1);
for (i = 1; i < OCF_USER_IO_CLASS_MAX; i++) {
expect_function_call(__wrap_ocf_user_part_is_added);
will_return(__wrap_ocf_user_part_is_added, 1);
expect_function_call(__wrap__ocf_mngt_set_partition_size);
will_return(__wrap__ocf_mngt_set_partition_size, 0);
expect_function_call(__wrap_ocf_part_is_valid);
will_return(__wrap_ocf_part_is_valid, 0);
expect_function_call(__wrap_ocf_user_part_is_valid);
will_return(__wrap_ocf_user_part_is_valid, 0);
expect_function_call(__wrap_ocf_part_set_valid);
expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN,
expect_function_call(__wrap_ocf_user_part_set_valid);
expect_in_range(__wrap_ocf_user_part_set_valid, id, OCF_IO_CLASS_ID_MIN,
OCF_IO_CLASS_ID_MAX);
expect_value(__wrap_ocf_part_set_valid, valid, true);
expect_value(__wrap_ocf_user_part_set_valid, valid, true);
expect_function_call(__wrap_ocf_part_set_prio);
expect_function_call(__wrap_ocf_user_part_set_prio);
}
expect_function_call(__wrap_ocf_part_sort);
expect_function_call(__wrap_ocf_user_part_sort);
result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
assert_int_equal(result, 0);
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
test_free(cache->user_parts[i].config);
test_free(cache);
@ -217,7 +217,7 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
static void ocf_mngt_io_classes_configure_test01(void **state)
{
struct ocf_cache *cache;
struct ocf_mngt_io_classes_config cfg[OCF_IO_CLASS_MAX];
struct ocf_mngt_io_classes_config cfg[OCF_USER_IO_CLASS_MAX];
int error_code = -OCF_ERR_INVAL;
int result;

View File

@ -1,382 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_get_cache_line</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* ocf_freelist_lock
* ocf_freelist_trylock
* ocf_freelist_unlock
* _ocf_freelist_remove_cache_line
* ocf_freelist_get_cache_line_fast
* ocf_freelist_get_cache_line_slow
* ocf_freelist_add_cache_line
* ocf_freelist_get_cache_line_ctx
* get_next_victim_freelist
* ocf_freelist_put_cache_line
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
unsigned __wrap_env_get_execution_context_count(void)
{
return mock();
}
unsigned __wrap_env_get_execution_context(void)
{
return mock();
}
void __wrap_env_put_execution_context(unsigned ctx)
{
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
/* metadata partition info interface mock: */
#define max_clines 100
struct {
ocf_cache_line_t prev;
ocf_cache_line_t next;
} partition_list[max_clines];
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
assert_int_equal(part_id, PARTITION_INVALID);
partition_list[line].prev = prev_line;
partition_list[line].next = next_line;
}
void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
{
if (part_id)
*part_id = PARTITION_INVALID;
if (prev_line)
*prev_line = partition_list[line].prev;
if (next_line)
*next_line = partition_list[line].next;
}
void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
partition_list[line].prev = prev_line;
}
void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
{
partition_list[line].next = next_line;
}
static void ocf_freelist_get_cache_line_get_fast(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify get free cache line get fast path");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* now there are following cachelines on per-context lists:
* ctx 0: 0, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7
*/
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
/* ctx 0: 0, 1, 2
* ctx 1: _, 4, 5
* ctx 2: 6, 7 */
/* get cline from context 2 */
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 6);
/* ctx 0: 0, 1, 2
* ctx 1: _, 4, 5
* ctx 2: _, 7 */
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
/* ctx 0: 0, 1, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
/* ctx 0: _, 1, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 1);
/* ctx 0: _, _, 2
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 0 */
will_return(__wrap_env_get_execution_context, 0);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
/* ctx 0: _, _, _,
* ctx 1: _, _, 5
* ctx 2: _, 7 */
/* get cline from context 2 */
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
/* ctx 0: _, _, _,
* ctx 1: _, _, _5
* ctx 2: _, _ */
/* get cline from context 1 */
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
/* ctx 0: _, _, _,
* ctx 1: _, _, _
* ctx 2: _, _ */
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_get_cache_line_get_slow(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify get free cache line get slow path");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
/* always return exec ctx 0 */
will_return_maybe(__wrap_env_get_execution_context, 0);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* now there are following cachelines on per-context lists:
* ctx 0: 0, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7
*/
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
/* ctx 0: _, 1, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 1);
/* ctx 0: _, _, 2
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
/* ctx 0: _, _, _
* ctx 1: 3, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
/* ctx 0: _, _, _
* ctx 1: _, 4, 5
* ctx 2: 6, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 6);
/* ctx 0: _, _, _
* ctx 1: _, 4, 5
* ctx 2: _, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
/* ctx 0: _, _, _
* ctx 1: _, _, 5
* ctx 2: _, 7 */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
/* ctx 0: _, _, _
* ctx 1: _, _, 5
* ctx 2: _, _ */
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
/* ctx 0: _, _, _,
* ctx 1: _, _, _
* ctx 2: _, _ */
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_get_cache_line_put(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify freelist cacheline put");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/* get some clines from the freelists */
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
will_return(__wrap_env_get_execution_context, 0);
ocf_freelist_get_cache_line(freelist, &line);
/* ctx 0:
* ctx 1: 4, 5
* ctx 2: 7 */
will_return(__wrap_env_get_execution_context, 1);
ocf_freelist_put_cache_line(freelist, 0);
will_return(__wrap_env_get_execution_context, 1);
ocf_freelist_put_cache_line(freelist, 2);
will_return(__wrap_env_get_execution_context, 2);
ocf_freelist_put_cache_line(freelist, 3);
/* ctx 0:
* ctx 1: 4, 5, 0, 2
* ctx 2: 7, 3*/
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 4);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 5);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 0);
will_return(__wrap_env_get_execution_context, 1);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 2);
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 7);
will_return(__wrap_env_get_execution_context, 2);
assert(ocf_freelist_get_cache_line(freelist, &line));
assert_int_equal(line, 3);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_get_cache_line_get_fast),
cmocka_unit_test(ocf_freelist_get_cache_line_get_slow),
cmocka_unit_test(ocf_freelist_get_cache_line_put)
};
print_message("Unit test for ocf_freelist_get_cache_line\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,68 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_populate</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_init_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
function_called();
return mock();
}
ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
{
function_called();
return mock();
}
static void ocf_freelist_init_test01(void **state)
{
unsigned num_cls = 9;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
ocf_cache_t cache = 0x1234;
print_test_description("Freelist initialization test");
expect_function_call(__wrap_ocf_metadata_collision_table_entries);
will_return(__wrap_ocf_metadata_collision_table_entries, num_cls);
expect_function_call(__wrap_env_get_execution_context_count);
will_return(__wrap_env_get_execution_context_count, num_ctxts);
ocf_freelist_init(&freelist, cache);
assert(freelist != NULL);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_init_test01)
};
print_message("Unit test of ocf_freelist_init\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,213 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_get_cache_line</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* ocf_freelist_unlock
* _ocf_freelist_remove_cache_line
* ocf_freelist_get_cache_line_fast
* ocf_freelist_get_cache_line_slow
* ocf_freelist_add_cache_line
* ocf_freelist_get_cache_line_ctx
* get_next_victim_freelist
* ocf_freelist_put_cache_line
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_get_put_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
unsigned __wrap_env_get_execution_context_count(void)
{
return mock();
}
unsigned __wrap_env_get_execution_context(void)
{
return mock();
}
void __wrap_env_put_execution_context(unsigned ctx)
{
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
void __wrap_ocf_freelist_lock(ocf_freelist_t freelist, uint32_t ctx)
{
function_called();
check_expected(ctx);
}
int __wrap_ocf_freelist_trylock(ocf_freelist_t freelist, uint32_t ctx)
{
function_called();
check_expected(ctx);
return mock();
}
/* metadata partition info interface mock: */
#define max_clines 100
struct {
ocf_cache_line_t prev;
ocf_cache_line_t next;
} partition_list[max_clines];
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
assert_int_equal(part_id, PARTITION_INVALID);
partition_list[line].prev = prev_line;
partition_list[line].next = next_line;
}
void __wrap_ocf_metadata_get_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t *part_id,
ocf_cache_line_t *next_line, ocf_cache_line_t *prev_line)
{
if (part_id)
*part_id = PARTITION_INVALID;
if (prev_line)
*prev_line = partition_list[line].prev;
if (next_line)
*next_line = partition_list[line].next;
}
void __wrap_ocf_metadata_set_partition_prev(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t prev_line)
{
partition_list[line].prev = prev_line;
}
void __wrap_ocf_metadata_set_partition_next(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_cache_line_t next_line)
{
partition_list[line].next = next_line;
}
static void ocf_freelist_get_put_locks(void **state)
{
unsigned num_cls = 4;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
ocf_cache_line_t line;
print_test_description("Verify lock/trylock sequence in get free cacheline");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
/* simulate context 1 for the entire test duration */
will_return_maybe(__wrap_env_get_execution_context, 1);
ocf_freelist_init(&freelist, NULL);
ocf_freelist_populate(freelist, num_cls);
/****************************************************************/
/* verify fast path locking - scucessfull trylock */
/* ctx 0: 0, 3
* ctx 1: 1
* ctx 2: 2
* slowpath next victim: 0
*/
expect_value(__wrap_ocf_freelist_trylock, ctx, 1);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 0);
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
/* verify fast path locking - scucessfull trylock in slowpath */
/* ctx 0: 0, 3
* ctx 1:
* ctx 2: 2
* slowpath next victim: 0 */
/* we expect trylock for context 0, since context 1 has empty list */
expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 0);
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
/* verify fast path locking - trylock failure in slowpath */
/* ctx 0: 3
* ctx 1:
* ctx 2: 2
* slowpath next victim: 1 */
/* fastpath will fail immediately - context 1 list is empty */
/* next slowpath victim context (1) is empty - will move to ctx 2 */
/* so now we expect trylock for context no 2 - injecting error here*/
expect_value(__wrap_ocf_freelist_trylock, ctx, 2);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 1);
/* slowpath will attempt to trylock next non-empty context - 0
* - injecting error here as well */
expect_value(__wrap_ocf_freelist_trylock, ctx, 0);
expect_function_call(__wrap_ocf_freelist_trylock);
will_return(__wrap_ocf_freelist_trylock, 1);
/* slowpath trylock loop failed - expecting full lock */
expect_value(__wrap_ocf_freelist_lock, ctx, 2);
expect_function_call(__wrap_ocf_freelist_lock);
/* execute freelist_get_cache_line */
ocf_freelist_get_cache_line(freelist, &line);
/****************************************************************/
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_get_put_locks)
};
print_message("Unit test for ocf_freelist_get_cache_line locking\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,138 +0,0 @@
/*
* <tested_file_path>src/ocf_freelist.c</tested_file_path>
* <tested_function>ocf_freelist_populate</tested_function>
* <functions_to_leave>
* ocf_freelist_init
* ocf_freelist_deinit
* ocf_freelist_populate
* next_phys_invalid
* </functions_to_leave>
*/
#undef static
#undef inline
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
#include "ocf/ocf.h"
#include "metadata/metadata.h"
#include "ocf_freelist.c/ocf_freelist_populate_generated_wraps.c"
ocf_cache_line_t __wrap_ocf_metadata_collision_table_entries(ocf_cache_t cache)
{
return mock();
}
ocf_cache_line_t __wrap_env_get_execution_context_count(ocf_cache_t cache)
{
return mock();
}
/* simulate no striping */
ocf_cache_line_t __wrap_ocf_metadata_map_phy2lg(ocf_cache_t cache, ocf_cache_line_t phy)
{
return phy;
}
bool __wrap_metadata_test_valid_any(ocf_cache_t cache, ocf_cache_line_t cline)
{
return mock();
}
void __wrap_ocf_metadata_set_partition_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id,
ocf_cache_line_t next_line, ocf_cache_line_t prev_line)
{
print_message("%s %u %u %u\n", __func__, prev_line, line, next_line);
check_expected(line);
check_expected(part_id);
check_expected(next_line);
check_expected(prev_line);
}
#define expect_set_info(curr, part, next, prev) \
expect_value(__wrap_ocf_metadata_set_partition_info, line, curr); \
expect_value(__wrap_ocf_metadata_set_partition_info, part_id, part); \
expect_value(__wrap_ocf_metadata_set_partition_info, next_line, next); \
expect_value(__wrap_ocf_metadata_set_partition_info, prev_line, prev);
static void ocf_freelist_populate_test01(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
print_test_description("Verify proper set_partition_info order and arguments - empty cache");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
will_return_maybe(__wrap_metadata_test_valid_any, false);
ocf_freelist_init(&freelist, NULL);
expect_set_info(0, PARTITION_INVALID, 1 , num_cls);
expect_set_info(1, PARTITION_INVALID, 2 , 0);
expect_set_info(2, PARTITION_INVALID, num_cls, 1);
expect_set_info(3, PARTITION_INVALID, 4 , num_cls);
expect_set_info(4, PARTITION_INVALID, 5 , 3);
expect_set_info(5, PARTITION_INVALID, num_cls, 4);
expect_set_info(6, PARTITION_INVALID, 7 , num_cls);
expect_set_info(7, PARTITION_INVALID, num_cls, 6);
ocf_freelist_populate(freelist, num_cls);
ocf_freelist_deinit(freelist);
}
static void ocf_freelist_populate_test02(void **state)
{
unsigned num_cls = 8;
unsigned num_ctxts = 3;
ocf_freelist_t freelist;
unsigned ctx_iter, cl_iter;
print_test_description("Verify proper set_partition_info order and arguments - some valid clines");
will_return_maybe(__wrap_ocf_metadata_collision_table_entries, num_cls);
will_return_maybe(__wrap_env_get_execution_context_count, num_ctxts);
ocf_freelist_init(&freelist, NULL);
/* simulate only cachelines 2, 3, 4, 7 invalid */
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, false);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, true);
will_return(__wrap_metadata_test_valid_any, false);
expect_set_info(2, PARTITION_INVALID, 3 , num_cls);
expect_set_info(3, PARTITION_INVALID, num_cls, 2);
expect_set_info(4, PARTITION_INVALID, num_cls, num_cls);
expect_set_info(7, PARTITION_INVALID, num_cls, num_cls);
ocf_freelist_populate(freelist, 4);
ocf_freelist_deinit(freelist);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_freelist_populate_test01),
cmocka_unit_test(ocf_freelist_populate_test02)
};
print_message("Unit test of src/ocf_freelist.c\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,5 +1,5 @@
/*
* <tested_file_path>src/eviction/lru.c</tested_file_path>
* <tested_file_path>src/ocf_lru.c</tested_file_path>
* <tested_function>_lru_init</tested_function>
* <functions_to_leave>
* update_lru_head
@ -23,8 +23,8 @@
#include <cmocka.h>
#include "print_desc.h"
#include "eviction.h"
#include "lru.h"
#include "ocf_space.h"
#include "ocf_lru.h"
#include "ops.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
@ -33,14 +33,19 @@
#include "../engine/engine_zero.h"
#include "../ocf_request.h"
#include "eviction/lru.c/lru_generated_wraps.c"
#include "ocf_lru.c/lru_generated_wraps.c"
#define META_COUNT 128
static union eviction_policy_meta meta[META_COUNT];
static struct ocf_lru_meta meta[META_COUNT];
union eviction_policy_meta*
__wrap_ocf_metadata_get_eviction_policy(ocf_cache_t cache, ocf_cache_line_t line)
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
{
return NULL;
}
struct ocf_lru_meta*
__wrap_ocf_metadata_get_lru(ocf_cache_t cache, ocf_cache_line_t line)
{
assert (line < META_COUNT);
return &meta[line];
@ -54,7 +59,7 @@ static void _lru_init_test01(void **state)
print_test_description("test init\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
assert_int_equal(l.num_hot, 0);
assert_int_equal(l.num_nodes, 0);
@ -71,12 +76,12 @@ static void check_hot_elems(struct ocf_lru_list *l)
unsigned curr = l->head;
for (i = 0; i < l->num_hot; i++) {
assert_int_equal(meta[curr].lru.hot, 1);
curr = meta[curr].lru.next;
assert_int_equal(meta[curr].hot, 1);
curr = meta[curr].next;
}
for (i = l->num_hot; i < l->num_nodes; i++) {
assert_int_equal(meta[curr].lru.hot, 0);
curr = meta[curr].lru.next;
assert_int_equal(meta[curr].hot, 0);
curr = meta[curr].next;
}
}
@ -89,7 +94,7 @@ static void _lru_init_test02(void **state)
print_test_description("test add\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
for (i = 1; i <= 8; i++)
{
@ -114,7 +119,7 @@ static void _lru_init_test03(void **state)
print_test_description("remove head\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
for (i = 1; i <= 8; i++) {
add_lru_head(NULL, &l, i, end_marker);
@ -150,7 +155,7 @@ static void _lru_init_test04(void **state)
print_test_description("remove tail\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
for (i = 1; i <= 8; i++) {
add_lru_head(NULL, &l, i, end_marker);
@ -188,7 +193,7 @@ static void _lru_init_test05(void **state)
print_test_description("remove last hot\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
for (i = 1; i <= 8; i++) {
add_lru_head(NULL, &l, i, end_marker);
@ -235,7 +240,7 @@ static void _lru_init_test06(void **state)
print_test_description("remove middle hot\n");
_lru_init(&l, end_marker);
_lru_init(&l, true);
for (i = 1; i <= 8; i++) {
add_lru_head(NULL, &l, i, end_marker);

View File

@ -1,14 +1,14 @@
/*
* <tested_file_path>src/eviction/lru.c</tested_file_path>
* <tested_file_path>src/ocf_lru.c</tested_file_path>
* <tested_function>lru_iter_next</tested_function>
* <functions_to_leave>
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
* ONE FUNCTION PER LINE
* lru_iter_init
* _lru_next_evp
* _lru_evp_is_empty
* _lru_evp_set_empty
* _lru_evp_all_empty
* _lru_next_lru
* _lru_lru_is_empty
* _lru_lru_set_empty
* _lru_lru_all_empty
* ocf_rotate_right
* lru_iter_eviction_next
* lru_iter_cleaning_next
@ -26,9 +26,8 @@
#include <cmocka.h>
#include "print_desc.h"
#include "eviction.h"
#include "lru.h"
#include "ops.h"
#include "ocf_space.h"
#include "ocf_lru.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
@ -36,11 +35,16 @@
#include "../engine/engine_zero.h"
#include "../ocf_request.h"
#include "eviction/lru.c/lru_iter_generated_wraps.c"
#include "ocf_lru.c/lru_iter_generated_wraps.c"
//#define DEBUG
ocf_cache_line_t test_cases[10 * OCF_NUM_EVICTION_LISTS][OCF_NUM_EVICTION_LISTS][20];
struct ocf_cache_line_concurrency *__wrap_ocf_cache_line_concurrency(ocf_cache_t cache)
{
return NULL;
}
ocf_cache_line_t test_cases[10 * OCF_NUM_LRU_LISTS][OCF_NUM_LRU_LISTS][20];
unsigned num_cases = 20;
void write_test_case_description(void)
@ -49,21 +53,21 @@ void write_test_case_description(void)
unsigned test_case = 0;
// case 0 - all lists empty
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
test_cases[0][i][test_case] = -1;
}
// case 1 - all lists with single element
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
test_cases[0][i][test_case] = 10 * i;
test_cases[1][i][test_case] = -1;
}
// case 2 - all lists have between 1 and 5 elements, increasingly
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned num_elements = 1 + i / (OCF_NUM_EVICTION_LISTS / 4);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 1 + i / (OCF_NUM_LRU_LISTS / 4);
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 10 * i + j;
@ -72,7 +76,7 @@ void write_test_case_description(void)
// case 3 - all lists have between 1 and 5 elements, modulo index
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 1 + (i % 5);
for (j = 0; j < num_elements; j++)
@ -82,8 +86,8 @@ void write_test_case_description(void)
// case 4 - all lists have between 0 and 4 elements, increasingly
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned num_elements = i / (OCF_NUM_EVICTION_LISTS / 4);
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i / (OCF_NUM_LRU_LISTS / 4);
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 10 * i + j;
@ -92,7 +96,7 @@ void write_test_case_description(void)
// case 5 - all lists have between 0 and 4 elements, modulo index
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = (i % 5);
for (j = 0; j < num_elements; j++)
@ -102,41 +106,41 @@ void write_test_case_description(void)
// case 6 - list length increasing by 1 from 0
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 7 - list length increasing by 1 from 1
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = i + 1;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 2 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 2 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 8 - list length increasing by 4 from 0
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 4 * i;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 4 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 4 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
// case 9 - list length increasing by 4 from 1
test_case++;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned num_elements = 4 * i + 1;
for (j = 0; j < num_elements; j++)
test_cases[j][i][test_case] = 5 * OCF_NUM_EVICTION_LISTS * i + j;
test_cases[j][i][test_case] = 5 * OCF_NUM_LRU_LISTS * i + j;
test_cases[j][i][test_case] = -1;
}
@ -146,8 +150,8 @@ void write_test_case_description(void)
while(test_case < 2 * (l + 1)) {
unsigned matching_case = test_case - l - 1;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
unsigned curr_list = (i + 4) % OCF_NUM_EVICTION_LISTS;
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
unsigned curr_list = (i + 4) % OCF_NUM_LRU_LISTS;
j = 0;
while(test_cases[j][i][matching_case] != -1) {
test_cases[j][curr_list][test_case] =
@ -160,13 +164,13 @@ void write_test_case_description(void)
}
/* transform cacheline numbers so that they remain unique but have
* assignment to list modulo OCF_NUM_EVICTION_LISTS */
* assignment to list modulo OCF_NUM_LRU_LISTS */
for (test_case = 0; test_case < num_cases; test_case++) {
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
j = 0;
while (test_cases[j][i][test_case] != -1) {
test_cases[j][i][test_case] = test_cases[j][i][test_case] *
OCF_NUM_EVICTION_LISTS + i;
OCF_NUM_LRU_LISTS + i;
j++;
}
}
@ -181,7 +185,7 @@ void write_test_case_description(void)
for (test_case = 0; test_case < num_cases; test_case++) {
print_message("test case no %d\n", test_case);
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++) {
for (i = 0; i < OCF_NUM_LRU_LISTS; i++) {
print_message("list %02u: ", i);
j = 0;
while (test_cases[j][i][test_case] != -1) {
@ -199,12 +203,12 @@ unsigned current_case;
struct ocf_lru_list list;
struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *part,
uint32_t evp, bool clean)
struct ocf_lru_list *__wrap_ocf_lru_get_list(struct ocf_user_part *user_part,
uint32_t lru, bool clean)
{
unsigned i = 0;
while (test_cases[i][evp][current_case] != -1)
while (test_cases[i][lru][current_case] != -1)
i++;
if (i == 0) {
@ -212,53 +216,53 @@ struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *part,
list.tail = -1;
list.num_nodes = 0;
} else {
list.head = test_cases[0][evp][current_case];
list.tail = test_cases[i - 1][evp][current_case];
list.head = test_cases[0][lru][current_case];
list.tail = test_cases[i - 1][lru][current_case];
list.num_nodes = i;
}
#ifdef DEBUG
print_message("list for case %u evp %u: head: %u tail %u elems %u\n",
current_case, evp, list.head, list.tail, list.num_nodes);
print_message("list for case %u lru %u: head: %u tail %u elems %u\n",
current_case, lru, list.head, list.tail, list.num_nodes);
#endif
return &list;
}
inline struct ocf_lru_list *__wrap_evp_get_cline_list(ocf_cache_t cache,
inline struct ocf_lru_list *__wrap_lru_get_cline_list(ocf_cache_t cache,
ocf_cache_line_t cline)
{
return __wrap_evp_lru_get_list(NULL, cline % OCF_NUM_EVICTION_LISTS, true);
return __wrap_ocf_lru_get_list(NULL, cline % OCF_NUM_LRU_LISTS, true);
}
union eviction_policy_meta policy;
struct ocf_lru_meta g_lru_meta;
union eviction_policy_meta *__wrap_ocf_metadata_get_eviction_policy(
struct ocf_lru_meta *__wrap_ocf_metadata_get_lru(
struct ocf_cache *cache, ocf_cache_line_t line)
{
unsigned i, j;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
j = 0;
while (test_cases[j][i][current_case] != -1) {
if (test_cases[j][i][current_case] == line) {
if (j == 0) {
policy.lru.prev = -1;
g_lru_meta.prev = -1;
} else {
policy.lru.prev =
g_lru_meta.prev =
test_cases[j - 1][i][current_case];
}
policy.lru.next = test_cases[j + 1][i][current_case];
g_lru_meta.next = test_cases[j + 1][i][current_case];
#ifdef DEBUG
print_message("[%u] next %u prev %u\n",
line, policy.lru.next,
policy.lru.prev);
line, g_lru_meta.next,
g_lru_meta.prev);
#endif
return &policy;
return &g_lru_meta;
}
j++;
}
@ -276,7 +280,7 @@ void __wrap_add_lru_head(ocf_cache_t cache,
unsigned int collision_index)
{
unsigned list_head = list->head;
unsigned i, j = collision_index % OCF_NUM_EVICTION_LISTS;
unsigned i, j = collision_index % OCF_NUM_LRU_LISTS;
i = 1;
while (test_cases[i][j][current_case] != -1)
@ -290,7 +294,7 @@ void __wrap_add_lru_head(ocf_cache_t cache,
test_cases[0][j][current_case] = collision_index;
#ifdef DEBUG
print_message("case %u evp %u head set to %u\n", current_case, j, collision_index);
print_message("case %u lru %u head set to %u\n", current_case, j, collision_index);
#endif
}
@ -303,7 +307,7 @@ void __wrap_remove_lru_list(ocf_cache_t cache,
unsigned i, j;
found = false;
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
j = 0;
@ -324,7 +328,7 @@ void __wrap_remove_lru_list(ocf_cache_t cache,
assert(found);
#ifdef DEBUG
print_message("case %u removed %u from evp %u\n", current_case, collision_index, i);
print_message("case %u removed %u from lru %u\n", current_case, collision_index, i);
#endif
}
@ -335,28 +339,33 @@ bool __wrap__lru_lock(struct ocf_lru_iter *iter,
return true;
}
bool __wrap__lru_trylock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
bool __wrap_ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
return true;
}
bool __wrap_ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
ocf_cache_line_t line)
{
return false;
}
static void _lru_run_test(unsigned test_case)
{
unsigned start_pos;
current_case = test_case;
for (start_pos = 0; start_pos < OCF_NUM_EVICTION_LISTS; start_pos++)
for (start_pos = 0; start_pos < OCF_NUM_LRU_LISTS; start_pos++)
{
struct ocf_lru_iter iter;
ocf_cache_line_t cache_line, expected_cache_line;
unsigned curr_evp = start_pos;
unsigned pos[OCF_NUM_EVICTION_LISTS];
unsigned curr_lru = start_pos;
unsigned pos[OCF_NUM_LRU_LISTS];
unsigned i;
write_test_case_description();
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
pos[i] = -1;
while(test_cases[pos[i] + 1][i][test_case] != -1)
@ -368,26 +377,26 @@ static void _lru_run_test(unsigned test_case)
do {
/* check what is expected to be returned from iterator */
if (pos[curr_evp] == -1) {
if (pos[curr_lru] == -1) {
i = 1;
while (i < OCF_NUM_EVICTION_LISTS &&
pos[(curr_evp + i) % OCF_NUM_EVICTION_LISTS]
while (i < OCF_NUM_LRU_LISTS &&
pos[(curr_lru + i) % OCF_NUM_LRU_LISTS]
== -1) {
i++;
}
if (i == OCF_NUM_EVICTION_LISTS) {
if (i == OCF_NUM_LRU_LISTS) {
/* reached end of lists */
expected_cache_line = -1;
} else {
curr_evp = (curr_evp + i) % OCF_NUM_EVICTION_LISTS;
expected_cache_line = test_cases[pos[curr_evp]]
[curr_evp][test_case];
pos[curr_evp]--;
curr_lru = (curr_lru + i) % OCF_NUM_LRU_LISTS;
expected_cache_line = test_cases[pos[curr_lru]]
[curr_lru][test_case];
pos[curr_lru]--;
}
} else {
expected_cache_line = test_cases[pos[curr_evp]]
[curr_evp][test_case];
pos[curr_evp]--;
expected_cache_line = test_cases[pos[curr_lru]]
[curr_lru][test_case];
pos[curr_lru]--;
}
/* get cacheline from iterator */
@ -395,11 +404,11 @@ static void _lru_run_test(unsigned test_case)
assert_int_equal(cache_line, expected_cache_line);
curr_evp = (curr_evp + 1) % OCF_NUM_EVICTION_LISTS;
curr_lru = (curr_lru + 1) % OCF_NUM_LRU_LISTS;
} while (cache_line != -1);
/* make sure all cachelines are visited */
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
for (i = 0; i < OCF_NUM_LRU_LISTS; i++)
{
assert_int_equal((unsigned)-1, pos[i]);
}

View File

@ -1,8 +1,8 @@
/*
* <tested_file_path>src/eviction/eviction.c</tested_file_path>
* <tested_function>ocf_evict_do</tested_function>
* <tested_file_path>src/ocf_space.c</tested_file_path>
* <tested_function>ocf_remap_do</tested_function>
* <functions_to_leave>
ocf_evict_partitions
ocf_evict_user_partitions
* </functions_to_leave>
*/
@ -17,55 +17,56 @@
#include <cmocka.h>
#include "print_desc.h"
#include "eviction.h"
#include "ops.h"
#include "../utils/utils_part.h"
#include "ocf_space.h"
#include "../utils/utils_user_part.h"
#include "eviction/eviction.c/eviction_generated_wraps.c"
#include "ocf_space.c/ocf_space_generated_wraps.c"
struct test_cache
{
struct ocf_cache cache;
struct ocf_user_part_config part[OCF_IO_CLASS_MAX];
uint32_t overflow[OCF_IO_CLASS_MAX];
uint32_t evictable[OCF_IO_CLASS_MAX];
struct ocf_user_part_config part[OCF_USER_IO_CLASS_MAX];
struct ocf_part_runtime runtime[OCF_USER_IO_CLASS_MAX];
uint32_t overflow[OCF_USER_IO_CLASS_MAX];
uint32_t evictable[OCF_USER_IO_CLASS_MAX];
uint32_t req_unmapped;
};
bool __wrap_ocf_eviction_can_evict(ocf_cache_t cache)
uint32_t __wrap_ocf_lru_num_free(ocf_cache_t cache)
{
return true;
return 0;
}
uint32_t __wrap_ocf_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *part)
uint32_t __wrap_ocf_user_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *user_part)
{
struct test_cache* tcache = cache;
return tcache->overflow[part->id];
return tcache->overflow[user_part->part.id];
}
uint32_t __wrap_ocf_evict_calculate(ocf_cache_t cache,
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
struct ocf_user_part *user_part, uint32_t to_evict, bool roundup)
{
struct test_cache* tcache = cache;
return min(tcache->evictable[part->id], to_evict);
return min(tcache->evictable[user_part->part.id], to_evict);
}
uint32_t __wrap_ocf_eviction_need_space(struct ocf_cache *cache,
ocf_queue_t io_queue, struct ocf_user_part *part,
uint32_t clines)
uint32_t __wrap_ocf_lru_req_clines(struct ocf_request *req,
struct ocf_part *src_part, uint32_t cline_no)
{
struct test_cache *tcache = (struct test_cache *)cache;
unsigned overflown_consumed = min(clines, tcache->overflow[part->id]);
struct test_cache *tcache = (struct test_cache *)req->cache;
unsigned overflown_consumed;
tcache->overflow[part->id] -= overflown_consumed;
tcache->evictable[part->id] -= clines;
tcache->req_unmapped -= clines;
overflown_consumed = min(cline_no, tcache->overflow[src_part->id]);
check_expected(part);
check_expected(clines);
tcache->overflow[src_part->id] -= overflown_consumed;
tcache->evictable[src_part->id] -= cline_no;
tcache->req_unmapped -= cline_no;
check_expected(src_part);
check_expected(cline_no);
function_called();
return mock();
@ -94,7 +95,7 @@ bool ocf_cache_is_device_attached(ocf_cache_t cache)
/* FIXME: copy-pasted from OCF */
int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
{
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
@ -102,10 +103,11 @@ int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->runtime->curr_size : 0;
env_atomic_read(&p1->part.runtime->curr_size)
: 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->runtime->curr_size : 0;
env_atomic_read(&p2->part.runtime->curr_size)
: 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;
@ -154,6 +156,7 @@ int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
return v2 - v1;
}
static struct ocf_lst_entry *_list_getter(
struct ocf_cache *cache, ocf_cache_line_t idx)
{
@ -166,18 +169,18 @@ static void init_part_list(struct test_cache *tcache)
{
unsigned i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
tcache->cache.user_parts[i].id = i;
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
tcache->cache.user_parts[i].part.id = i;
tcache->cache.user_parts[i].config = &tcache->part[i];
tcache->cache.user_parts[i].config->priority = i+1;
tcache->cache.user_parts[i].config->flags.eviction = 1;
}
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.lst_part, OCF_IO_CLASS_MAX,
_list_getter, ocf_part_lst_cmp_valid);
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->cache.user_parts[i].lst_valid);
ocf_lst_add_tail(&tcache->cache.lst_part, i);
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.user_part_list, OCF_USER_IO_CLASS_MAX,
_list_getter, ocf_user_part_lst_cmp_valid);
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
ocf_lst_init_entry(&tcache->cache.user_part_list, &tcache->cache.user_parts[i].lst_valid);
ocf_lst_add_tail(&tcache->cache.user_part_list, i);
}
}
@ -190,13 +193,13 @@ uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
#define _expect_evict_call(tcache, part_id, req_count, ret_count) \
do { \
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.cache.user_parts[part_id]); \
expect_value(__wrap_ocf_eviction_need_space, clines, req_count); \
expect_function_call(__wrap_ocf_eviction_need_space); \
will_return(__wrap_ocf_eviction_need_space, ret_count); \
expect_value(__wrap_ocf_lru_req_clines, src_part, &tcache.cache.user_parts[part_id].part); \
expect_value(__wrap_ocf_lru_req_clines, cline_no, req_count); \
expect_function_call(__wrap_ocf_lru_req_clines); \
will_return(__wrap_ocf_lru_req_clines, ret_count); \
} while (false);
static void ocf_evict_do_test01(void **state)
static void ocf_remap_do_test01(void **state)
{
struct test_cache tcache = {};
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
@ -210,11 +213,11 @@ static void ocf_evict_do_test01(void **state)
tcache.req_unmapped = 50;
_expect_evict_call(tcache, 10, 50, 50);
evicted = ocf_evict_do(&req);
evicted = ocf_remap_do(&req);
assert_int_equal(evicted, 50);
}
static void ocf_evict_do_test02(void **state)
static void ocf_remap_do_test02(void **state)
{
struct test_cache tcache = {};
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
@ -231,11 +234,11 @@ static void ocf_evict_do_test02(void **state)
_expect_evict_call(tcache, 10, 50, 50);
evicted = ocf_evict_do(&req);
evicted = ocf_remap_do(&req);
assert_int_equal(evicted, 50);
}
static void ocf_evict_do_test03(void **state)
static void ocf_remap_do_test03(void **state)
{
struct test_cache tcache = {};
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
@ -257,11 +260,11 @@ static void ocf_evict_do_test03(void **state)
_expect_evict_call(tcache, 16, 100, 100);
_expect_evict_call(tcache, 17, 50, 50);
evicted = ocf_evict_do(&req);
evicted = ocf_remap_do(&req);
assert_int_equal(evicted, 350);
}
static void ocf_evict_do_test04(void **state)
static void ocf_remap_do_test04(void **state)
{
struct test_cache tcache = {};
struct ocf_request req = {.cache = &tcache.cache, .part_id = 0 };
@ -291,16 +294,16 @@ static void ocf_evict_do_test04(void **state)
_expect_evict_call(tcache, 16, 100, 100);
_expect_evict_call(tcache, 17, 80, 80);
evicted = ocf_evict_do(&req);
evicted = ocf_remap_do(&req);
assert_int_equal(evicted, 580);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_evict_do_test01),
cmocka_unit_test(ocf_evict_do_test02),
cmocka_unit_test(ocf_evict_do_test03),
cmocka_unit_test(ocf_evict_do_test04)
cmocka_unit_test(ocf_remap_do_test01),
cmocka_unit_test(ocf_remap_do_test02),
cmocka_unit_test(ocf_remap_do_test03),
cmocka_unit_test(ocf_remap_do_test04)
};
return cmocka_run_group_tests(tests, NULL, NULL);