Move common user and freelist partition data to a new struct
New structure ocf_part is added to contain all the data common for both user partitions and freelist partition: part_runtime and part_id. ocf_user_part now contains ocf_part structure as well as pointer to cleaning partition runtime metadata (moved out from part_runtime) and user partition config (no change here). Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
c0b76f9e01
commit
87f834c793
@ -308,7 +308,7 @@ typedef enum {
|
||||
/**
|
||||
* Maximum numbers of IO classes per cache instance
|
||||
*/
|
||||
#define OCF_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
|
||||
#define OCF_USER_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
|
||||
/**
|
||||
* Minimum value of a valid IO class ID
|
||||
*/
|
||||
@ -316,11 +316,11 @@ typedef enum {
|
||||
/**
|
||||
* Maximum value of a valid IO class ID
|
||||
*/
|
||||
#define OCF_IO_CLASS_ID_MAX (OCF_IO_CLASS_MAX - 1)
|
||||
#define OCF_IO_CLASS_ID_MAX (OCF_USER_IO_CLASS_MAX - 1)
|
||||
/**
|
||||
* Invalid value of IO class id
|
||||
*/
|
||||
#define OCF_IO_CLASS_INVALID OCF_IO_CLASS_MAX
|
||||
#define OCF_IO_CLASS_INVALID OCF_USER_IO_CLASS_MAX
|
||||
|
||||
/** Maximum size of the IO class name */
|
||||
#define OCF_IO_CLASS_NAME_MAX 1024
|
||||
|
@ -874,7 +874,7 @@ struct ocf_mngt_io_class_config {
|
||||
};
|
||||
|
||||
struct ocf_mngt_io_classes_config {
|
||||
struct ocf_mngt_io_class_config config[OCF_IO_CLASS_MAX];
|
||||
struct ocf_mngt_io_class_config config[OCF_USER_IO_CLASS_MAX];
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "alru.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_realloc.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
@ -55,62 +55,33 @@ struct alru_flush_ctx {
|
||||
|
||||
struct alru_context {
|
||||
struct alru_flush_ctx flush_ctx;
|
||||
env_spinlock list_lock[OCF_IO_CLASS_MAX];
|
||||
env_spinlock list_lock[OCF_USER_IO_CLASS_MAX];
|
||||
};
|
||||
|
||||
|
||||
/* -- Start of ALRU functions -- */
|
||||
|
||||
|
||||
/* Sets the given collision_index as the new _head_ of the ALRU list. */
|
||||
static inline void update_alru_head(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
|
||||
part->runtime->cleaning.policy.alru.lru_head = collision_index;
|
||||
}
|
||||
|
||||
/* Sets the given collision_index as the new _tail_ of the ALRU list. */
|
||||
static inline void update_alru_tail(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
|
||||
part->runtime->cleaning.policy.alru.lru_tail = collision_index;
|
||||
}
|
||||
|
||||
/* Sets the given collision_index as the new _head_ and _tail_
|
||||
* of the ALRU list.
|
||||
*/
|
||||
static inline void update_alru_head_tail(struct ocf_cache *cache,
|
||||
int partition_id, unsigned int collision_index)
|
||||
{
|
||||
update_alru_head(cache, partition_id, collision_index);
|
||||
update_alru_tail(cache, partition_id, collision_index);
|
||||
}
|
||||
|
||||
|
||||
/* Adds the given collision_index to the _head_ of the ALRU list */
|
||||
static void add_alru_head(struct ocf_cache *cache, int partition_id,
|
||||
unsigned int collision_index)
|
||||
{
|
||||
unsigned int curr_head_index;
|
||||
unsigned int collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
|
||||
.clean_pol->policy.alru;
|
||||
struct alru_cleaning_policy_meta *alru;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
|
||||
ENV_BUG_ON(env_atomic_read(
|
||||
&part->runtime->cleaning.policy.alru.size) < 0);
|
||||
ENV_BUG_ON(env_atomic_read(&part_alru->size) < 0);
|
||||
|
||||
ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
|
||||
ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
|
||||
|
||||
/* First node to be added/ */
|
||||
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
|
||||
update_alru_head_tail(cache, partition_id, collision_index);
|
||||
if (env_atomic_read(&part_alru->size) == 0) {
|
||||
part_alru->lru_head = collision_index;
|
||||
part_alru->lru_tail = collision_index;
|
||||
|
||||
alru = &ocf_metadata_get_cleaning_policy(cache,
|
||||
collision_index)->meta.alru;
|
||||
@ -121,7 +92,7 @@ static void add_alru_head(struct ocf_cache *cache, int partition_id,
|
||||
} else {
|
||||
/* Not the first node to be added. */
|
||||
|
||||
curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
|
||||
curr_head_index = part_alru->lru_head;
|
||||
|
||||
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
|
||||
|
||||
@ -136,10 +107,10 @@ static void add_alru_head(struct ocf_cache *cache, int partition_id,
|
||||
curr_head_index)->meta.alru;
|
||||
alru->lru_prev = collision_index;
|
||||
|
||||
update_alru_head(cache, partition_id, collision_index);
|
||||
part_alru->lru_head = collision_index;
|
||||
}
|
||||
|
||||
env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
|
||||
env_atomic_inc(&part_alru->size);
|
||||
}
|
||||
|
||||
/* Deletes the node with the given collision_index from the ALRU list */
|
||||
@ -148,14 +119,13 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
{
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct alru_cleaning_policy *cleaning_policy =
|
||||
&part->runtime->cleaning.policy.alru;
|
||||
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
|
||||
.clean_pol->policy.alru;
|
||||
struct alru_cleaning_policy_meta *alru;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
|
||||
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
|
||||
if (env_atomic_read(&part_alru->size) == 0) {
|
||||
ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
|
||||
"from empty ALRU Cleaning Policy queue!\n");
|
||||
ENV_BUG();
|
||||
@ -170,29 +140,29 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
/* Check if entry is not part of the ALRU list */
|
||||
if ((next_lru_node == collision_table_entries) &&
|
||||
(prev_lru_node == collision_table_entries) &&
|
||||
(cleaning_policy->lru_head != collision_index) &&
|
||||
(cleaning_policy->lru_tail != collision_index)) {
|
||||
(part_alru->lru_head != collision_index) &&
|
||||
(part_alru->lru_tail != collision_index)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Case 0: If we are head AND tail, there is only one node. So unlink
|
||||
* node and set that there is no node left in the list.
|
||||
*/
|
||||
if (cleaning_policy->lru_head == collision_index &&
|
||||
cleaning_policy->lru_tail == collision_index) {
|
||||
if (part_alru->lru_head == collision_index &&
|
||||
part_alru->lru_tail == collision_index) {
|
||||
alru->lru_next = collision_table_entries;
|
||||
alru->lru_prev = collision_table_entries;
|
||||
|
||||
|
||||
update_alru_head_tail(cache, partition_id,
|
||||
collision_table_entries);
|
||||
part_alru->lru_head = collision_table_entries;
|
||||
part_alru->lru_tail = collision_table_entries;
|
||||
}
|
||||
|
||||
/* Case 1: else if this collision_index is ALRU head, but not tail,
|
||||
* update head and return
|
||||
*/
|
||||
else if ((cleaning_policy->lru_tail != collision_index) &&
|
||||
(cleaning_policy->lru_head == collision_index)) {
|
||||
else if ((part_alru->lru_tail != collision_index) &&
|
||||
(part_alru->lru_head == collision_index)) {
|
||||
struct alru_cleaning_policy_meta *next_alru;
|
||||
|
||||
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
|
||||
@ -200,7 +170,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
next_alru = &ocf_metadata_get_cleaning_policy(cache,
|
||||
next_lru_node)->meta.alru;
|
||||
|
||||
update_alru_head(cache, partition_id, next_lru_node);
|
||||
part_alru->lru_head = next_lru_node;
|
||||
|
||||
alru->lru_next = collision_table_entries;
|
||||
next_alru->lru_prev = collision_table_entries;
|
||||
@ -210,8 +180,8 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
/* Case 2: else if this collision_index is ALRU tail, but not head,
|
||||
* update tail and return
|
||||
*/
|
||||
else if ((cleaning_policy->lru_head != collision_index) &&
|
||||
(cleaning_policy->lru_tail == collision_index)) {
|
||||
else if ((part_alru->lru_head != collision_index) &&
|
||||
(part_alru->lru_tail == collision_index)) {
|
||||
struct alru_cleaning_policy_meta *prev_alru;
|
||||
|
||||
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
|
||||
@ -219,7 +189,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
prev_alru = &ocf_metadata_get_cleaning_policy(cache,
|
||||
prev_lru_node)->meta.alru;
|
||||
|
||||
update_alru_tail(cache, partition_id, prev_lru_node);
|
||||
part_alru->lru_tail = prev_lru_node;
|
||||
|
||||
alru->lru_prev = collision_table_entries;
|
||||
prev_alru->lru_next = collision_table_entries;
|
||||
@ -249,7 +219,7 @@ static void remove_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
|
||||
}
|
||||
|
||||
env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
|
||||
env_atomic_dec(&part_alru->size);
|
||||
}
|
||||
|
||||
static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
@ -257,9 +227,8 @@ static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
{
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[partition_id];
|
||||
struct alru_cleaning_policy *cleaning_policy =
|
||||
&part->runtime->cleaning.policy.alru;
|
||||
struct alru_cleaning_policy *part_alru = &cache->user_parts[partition_id]
|
||||
.clean_pol->policy.alru;
|
||||
struct alru_cleaning_policy_meta *alru;
|
||||
|
||||
ENV_BUG_ON(!(collision_index < collision_table_entries));
|
||||
@ -270,8 +239,8 @@ static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
|
||||
next_lru_node = alru->lru_next;
|
||||
prev_lru_node = alru->lru_prev;
|
||||
|
||||
return cleaning_policy->lru_tail == collision_index ||
|
||||
cleaning_policy->lru_head == collision_index ||
|
||||
return part_alru->lru_tail == collision_index ||
|
||||
part_alru->lru_head == collision_index ||
|
||||
next_lru_node != collision_table_entries ||
|
||||
prev_lru_node != collision_table_entries;
|
||||
}
|
||||
@ -321,13 +290,12 @@ static void __cleaning_policy_alru_purge_cache_block_any(
|
||||
|
||||
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
|
||||
uint64_t start_byte, uint64_t end_byte) {
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
ocf_part_id_t part_id;
|
||||
int ret = 0;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
if (env_atomic_read(&part->runtime->cleaning.
|
||||
policy.alru.size) == 0)
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
if (env_atomic_read(&user_part->clean_pol->policy.alru.size) == 0)
|
||||
continue;
|
||||
|
||||
ret |= ocf_metadata_actor(cache, part_id,
|
||||
@ -344,8 +312,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
|
||||
struct alru_context *ctx = cache->cleaner.cleaning_policy_context;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||
cache_line);
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
|
||||
struct alru_cleaning_policy *part_alru = &cache->user_parts[part_id]
|
||||
.clean_pol->policy.alru;
|
||||
uint32_t prev_lru_node, next_lru_node;
|
||||
uint32_t collision_table_entries = cache->device->collision_table_entries;
|
||||
struct alru_cleaning_policy_meta *alru;
|
||||
@ -362,10 +330,8 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
|
||||
|
||||
if ((next_lru_node != collision_table_entries) ||
|
||||
(prev_lru_node != collision_table_entries) ||
|
||||
((part->runtime->cleaning.policy.
|
||||
alru.lru_head == cache_line) &&
|
||||
(part->runtime->cleaning.policy.
|
||||
alru.lru_tail == cache_line)))
|
||||
((part_alru->lru_head == cache_line) &&
|
||||
(part_alru->lru_tail == cache_line)))
|
||||
remove_alru_list(cache, part_id, cache_line);
|
||||
|
||||
add_alru_head(cache, part_id, cache_line);
|
||||
@ -375,19 +341,19 @@ void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
|
||||
|
||||
static void _alru_rebuild(struct ocf_cache *cache)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
struct alru_cleaning_policy *part_alru;
|
||||
ocf_part_id_t part_id;
|
||||
ocf_core_id_t core_id;
|
||||
ocf_cache_line_t cline;
|
||||
uint32_t step = 0;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
/* ALRU initialization */
|
||||
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
|
||||
part->runtime->cleaning.policy.alru.lru_head =
|
||||
cache->device->collision_table_entries;
|
||||
part->runtime->cleaning.policy.alru.lru_tail =
|
||||
cache->device->collision_table_entries;
|
||||
part_alru = &user_part->clean_pol->policy.alru;
|
||||
env_atomic_set(&part_alru->size, 0);
|
||||
part_alru->lru_head = cache->device->collision_table_entries;
|
||||
part_alru->lru_tail = cache->device->collision_table_entries;
|
||||
cache->device->runtime_meta->cleaning_thread_access = 0;
|
||||
}
|
||||
|
||||
@ -410,15 +376,16 @@ static void _alru_rebuild(struct ocf_cache *cache)
|
||||
}
|
||||
|
||||
static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part, int init_metadata)
|
||||
struct ocf_user_part *user_part, int init_metadata)
|
||||
{
|
||||
struct alru_cleaning_policy *part_alru =
|
||||
&user_part->clean_pol->policy.alru;
|
||||
|
||||
if (init_metadata) {
|
||||
/* ALRU initialization */
|
||||
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
|
||||
part->runtime->cleaning.policy.alru.lru_head =
|
||||
cache->device->collision_table_entries;
|
||||
part->runtime->cleaning.policy.alru.lru_tail =
|
||||
cache->device->collision_table_entries;
|
||||
env_atomic_set(&part_alru->size, 0);
|
||||
part_alru->lru_head = cache->device->collision_table_entries;
|
||||
part_alru->lru_tail = cache->device->collision_table_entries;
|
||||
}
|
||||
|
||||
cache->device->runtime_meta->cleaning_thread_access = 0;
|
||||
@ -440,7 +407,7 @@ void cleaning_policy_alru_setup(struct ocf_cache *cache)
|
||||
|
||||
int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
ocf_part_id_t part_id;
|
||||
struct alru_context *ctx;
|
||||
int error = 0;
|
||||
@ -452,7 +419,7 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
|
||||
return -OCF_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
error = env_spinlock_init(&ctx->list_lock[i]);
|
||||
if (error)
|
||||
break;
|
||||
@ -468,9 +435,9 @@ int cleaning_policy_alru_initialize(ocf_cache_t cache, int init_metadata)
|
||||
|
||||
cache->cleaner.cleaning_policy_context = ctx;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
cleaning_policy_alru_initialize_part(cache,
|
||||
part, init_metadata);
|
||||
user_part, init_metadata);
|
||||
}
|
||||
|
||||
if (init_metadata)
|
||||
@ -486,7 +453,7 @@ void cleaning_policy_alru_deinitialize(struct ocf_cache *cache)
|
||||
struct alru_context *alru = cache->cleaner.cleaning_policy_context;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
|
||||
env_spinlock_destroy(&alru->list_lock[i]);
|
||||
|
||||
env_vfree(cache->cleaner.cleaning_policy_context);
|
||||
@ -697,17 +664,17 @@ static int get_data_to_flush(struct alru_context *ctx)
|
||||
struct alru_cleaning_policy_config *config;
|
||||
struct alru_cleaning_policy_meta *alru;
|
||||
ocf_cache_line_t cache_line;
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
uint32_t last_access;
|
||||
int to_flush = 0;
|
||||
int part_id = OCF_IO_CLASS_ID_MAX;
|
||||
|
||||
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
env_spinlock_lock(&ctx->list_lock[part_id]);
|
||||
|
||||
cache_line = part->runtime->cleaning.policy.alru.lru_tail;
|
||||
cache_line = user_part->clean_pol->policy.alru.lru_tail;
|
||||
|
||||
last_access = compute_timestamp(config);
|
||||
|
||||
|
@ -24,7 +24,7 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
|
||||
goto global_err;
|
||||
}
|
||||
|
||||
for (part_iter = 0; part_iter < OCF_IO_CLASS_MAX; part_iter++) {
|
||||
for (part_iter = 0; part_iter < OCF_USER_IO_CLASS_MAX; part_iter++) {
|
||||
err = env_spinlock_init(&metadata_lock->partition[part_iter]);
|
||||
if (err)
|
||||
goto partition_err;
|
||||
@ -50,7 +50,7 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
|
||||
env_spinlock_destroy(&metadata_lock->partition[i]);
|
||||
|
||||
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "engine_discard.h"
|
||||
#include "engine_d2c.h"
|
||||
#include "engine_ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_refcnt.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -192,8 +192,8 @@ void ocf_resolve_effective_cache_mode(ocf_cache_t cache,
|
||||
return;
|
||||
}
|
||||
|
||||
req->cache_mode = ocf_part_get_cache_mode(cache,
|
||||
ocf_part_class2id(cache, req->part_id));
|
||||
req->cache_mode = ocf_user_part_get_cache_mode(cache,
|
||||
ocf_user_part_class2id(cache, req->part_id));
|
||||
if (!ocf_cache_mode_is_valid(req->cache_mode))
|
||||
req->cache_mode = cache->conf_meta->cache_mode;
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../eviction/eviction.h"
|
||||
#include "../promotion/promotion.h"
|
||||
@ -517,7 +517,7 @@ static inline int ocf_prepare_clines_evict(struct ocf_request *req)
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
bool part_has_space;
|
||||
|
||||
part_has_space = ocf_part_has_space(req);
|
||||
part_has_space = ocf_user_part_has_space(req);
|
||||
if (!part_has_space) {
|
||||
/* adding more cachelines to target partition would overflow
|
||||
it - requesting eviction from target partition only */
|
||||
@ -544,16 +544,16 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req)
|
||||
int lock_status = -OCF_ERR_NO_LOCK;
|
||||
|
||||
/* requests to disabled partitions go in pass-through */
|
||||
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
|
||||
if (!ocf_user_part_is_enabled(&req->cache->user_parts[req->part_id])) {
|
||||
ocf_req_set_mapping_error(req);
|
||||
return lock_status;
|
||||
}
|
||||
|
||||
/* NOTE: ocf_part_has_space() below uses potentially stale request
|
||||
/* NOTE: ocf_user_part_has_space() below uses potentially stale request
|
||||
* statistics (collected before hash bucket lock had been upgraded).
|
||||
* It is ok since this check is opportunistic, as partition occupancy
|
||||
* is also subject to change. */
|
||||
if (!ocf_part_has_space(req)) {
|
||||
if (!ocf_user_part_has_space(req)) {
|
||||
ocf_engine_lookup(req);
|
||||
return ocf_prepare_clines_evict(req);
|
||||
}
|
||||
@ -579,7 +579,7 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req)
|
||||
|
||||
int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_user_part *part = &req->cache->user_parts[req->part_id];
|
||||
struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id];
|
||||
bool mapped;
|
||||
bool promote = true;
|
||||
int lock = -OCF_ERR_NO_LOCK;
|
||||
@ -628,7 +628,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
||||
if (ocf_req_test_clean_eviction(req)) {
|
||||
ocf_eviction_flush_dirty(req->cache, part, req->io_queue,
|
||||
ocf_eviction_flush_dirty(req->cache, user_part, req->io_queue,
|
||||
128);
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "engine_pt.h"
|
||||
#include "engine_wb.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../metadata/metadata.h"
|
||||
@ -77,7 +77,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
ocf_user_part_move(req);
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
}
|
||||
@ -126,7 +126,7 @@ int ocf_read_fast(struct ocf_request *req)
|
||||
|
||||
hit = ocf_engine_is_hit(req);
|
||||
|
||||
part_has_space = ocf_part_has_space(req);
|
||||
part_has_space = ocf_user_part_has_space(req);
|
||||
|
||||
if (hit && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
@ -198,7 +198,7 @@ int ocf_write_fast(struct ocf_request *req)
|
||||
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
|
||||
part_has_space = ocf_part_has_space(req);
|
||||
part_has_space = ocf_user_part_has_space(req);
|
||||
|
||||
if (mapped && part_has_space) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "cache_engine.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
@ -75,7 +75,7 @@ int ocf_read_pt_do(struct ocf_request *req)
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
ocf_user_part_move(req);
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
|
||||
@ -182,7 +182,7 @@ static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
ocf_user_part_move(req);
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_request.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wb"
|
||||
@ -135,7 +135,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
ocf_user_part_move(req);
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
#define OCF_ENGINE_DEBUG_IO_NAME "wo"
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
|
||||
@ -123,7 +123,7 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
|
||||
/* Probably some cache lines are assigned into wrong
|
||||
* partition. Need to move it to new one
|
||||
*/
|
||||
ocf_part_move(req);
|
||||
ocf_user_part_move(req);
|
||||
}
|
||||
|
||||
ocf_hb_req_prot_unlock_wr(req);
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#include "eviction.h"
|
||||
#include "ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../engine/engine_common.h"
|
||||
|
||||
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
@ -23,11 +23,11 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
|
||||
};
|
||||
|
||||
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t to_evict)
|
||||
struct ocf_user_part *user_part, uint32_t to_evict)
|
||||
{
|
||||
|
||||
uint32_t curr_part_size = ocf_part_get_occupancy(part);
|
||||
uint32_t min_part_size = ocf_part_get_min_size(cache, part);
|
||||
uint32_t curr_part_size = ocf_part_get_occupancy(&user_part->part);
|
||||
uint32_t min_part_size = ocf_user_part_get_min_size(cache, user_part);
|
||||
|
||||
if (curr_part_size <= min_part_size) {
|
||||
/*
|
||||
@ -44,7 +44,7 @@ static uint32_t ocf_evict_calculate(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
struct ocf_user_part *target_part)
|
||||
struct ocf_user_part *user_part)
|
||||
{
|
||||
uint32_t unmapped = ocf_engine_unmapped_count(req);
|
||||
uint32_t to_evict = 0;
|
||||
@ -52,7 +52,7 @@ static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
if (!evp_lru_can_evict(req->cache))
|
||||
return 0;
|
||||
|
||||
to_evict = ocf_evict_calculate(req->cache, target_part, unmapped);
|
||||
to_evict = ocf_evict_calculate(req->cache, user_part, unmapped);
|
||||
|
||||
if (to_evict < unmapped) {
|
||||
/* cannot evict enough cachelines to map request,
|
||||
@ -60,34 +60,34 @@ static inline uint32_t ocf_evict_part_do(struct ocf_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ocf_eviction_need_space(req->cache, req, target_part, to_evict);
|
||||
return ocf_eviction_need_space(req->cache, req, &user_part->part, to_evict);
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
static inline uint32_t ocf_evict_user_partitions(ocf_cache_t cache,
|
||||
struct ocf_request *req, uint32_t evict_cline_no,
|
||||
bool overflown_only, int16_t max_priority)
|
||||
{
|
||||
uint32_t to_evict = 0, evicted = 0;
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
ocf_part_id_t part_id;
|
||||
unsigned overflow_size;
|
||||
|
||||
/* For each partition from the lowest priority to highest one */
|
||||
for_each_part(cache, part, part_id) {
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
if (!ocf_eviction_can_evict(cache))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Check stop and continue conditions
|
||||
*/
|
||||
if (max_priority > part->config->priority) {
|
||||
if (max_priority > user_part->config->priority) {
|
||||
/*
|
||||
* iterate partition have higher priority,
|
||||
* do not evict
|
||||
*/
|
||||
break;
|
||||
}
|
||||
if (!overflown_only && !part->config->flags.eviction) {
|
||||
if (!overflown_only && !user_part->config->flags.eviction) {
|
||||
/* If partition is overflown it should be evcited
|
||||
* even if its pinned
|
||||
*/
|
||||
@ -95,12 +95,12 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
}
|
||||
|
||||
if (overflown_only) {
|
||||
overflow_size = ocf_part_overflow_size(cache, part);
|
||||
overflow_size = ocf_user_part_overflow_size(cache, user_part);
|
||||
if (overflow_size == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
to_evict = ocf_evict_calculate(cache, part,
|
||||
to_evict = ocf_evict_calculate(cache, user_part,
|
||||
evict_cline_no - evicted);
|
||||
if (to_evict == 0) {
|
||||
/* No cache lines to evict for this partition */
|
||||
@ -110,7 +110,8 @@ static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
|
||||
if (overflown_only)
|
||||
to_evict = OCF_MIN(to_evict, overflow_size);
|
||||
|
||||
evicted += ocf_eviction_need_space(cache, req, part, to_evict);
|
||||
evicted += ocf_eviction_need_space(cache, req,
|
||||
&user_part->part, to_evict);
|
||||
|
||||
if (evicted >= evict_cline_no) {
|
||||
/* Evicted requested number of cache line, stop
|
||||
@ -138,7 +139,7 @@ static inline uint32_t ocf_evict_do(struct ocf_request *req)
|
||||
* free its cachelines regardless of destination partition
|
||||
* priority. */
|
||||
|
||||
evicted = ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
evicted = ocf_evict_user_partitions(cache, req, evict_cline_no,
|
||||
true, OCF_IO_CLASS_PRIO_PINNED);
|
||||
if (evicted >= evict_cline_no)
|
||||
return evicted;
|
||||
@ -146,7 +147,7 @@ static inline uint32_t ocf_evict_do(struct ocf_request *req)
|
||||
* partitions with priority <= target partition and attempt
|
||||
* to evict from those. */
|
||||
evict_cline_no -= evicted;
|
||||
evicted += ocf_evict_partitions(cache, req, evict_cline_no,
|
||||
evicted += ocf_evict_user_partitions(cache, req, evict_cline_no,
|
||||
false, target_part->config->priority);
|
||||
|
||||
return evicted;
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#define OCF_NUM_EVICTION_LISTS 32
|
||||
|
||||
struct ocf_user_part;
|
||||
struct ocf_part;
|
||||
struct ocf_request;
|
||||
|
||||
struct eviction_policy {
|
||||
@ -39,17 +39,17 @@ struct eviction_policy_ops {
|
||||
void (*rm_cline)(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline);
|
||||
bool (*can_evict)(ocf_cache_t cache);
|
||||
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
|
||||
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
||||
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
|
||||
void (*init_evp)(ocf_cache_t cache, struct ocf_part *part);
|
||||
void (*dirty_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
struct ocf_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*clean_cline)(ocf_cache_t cache,
|
||||
struct ocf_user_part *part,
|
||||
struct ocf_part *part,
|
||||
uint32_t cline_no);
|
||||
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *user_part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
const char *name;
|
||||
};
|
||||
|
@ -235,7 +235,7 @@ void evp_lru_init_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
||||
node->next = end_marker;
|
||||
}
|
||||
|
||||
static struct ocf_lru_list *evp_lru_get_list(struct ocf_user_part *part,
|
||||
static struct ocf_lru_list *evp_lru_get_list(struct ocf_part *part,
|
||||
uint32_t evp, bool clean)
|
||||
{
|
||||
return clean ? &part->runtime->eviction[evp].policy.lru.clean :
|
||||
@ -246,7 +246,7 @@ static inline struct ocf_lru_list *evp_get_cline_list(ocf_cache_t cache,
|
||||
ocf_cache_line_t cline)
|
||||
{
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &cache->user_parts[part_id].part;
|
||||
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
|
||||
|
||||
return evp_lru_get_list(part, ev_list,
|
||||
@ -264,7 +264,7 @@ void evp_lru_rm_cline(ocf_cache_t cache, ocf_cache_line_t cline)
|
||||
}
|
||||
|
||||
static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t start_evp, bool clean,
|
||||
struct ocf_part *part, uint32_t start_evp, bool clean,
|
||||
bool cl_lock_write, _lru_hash_locked_pfn hash_locked,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
@ -290,7 +290,7 @@ static inline void lru_iter_init(struct ocf_lru_iter *iter, ocf_cache_t cache,
|
||||
}
|
||||
|
||||
static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_cache_t cache, struct ocf_part *part,
|
||||
uint32_t start_evp)
|
||||
{
|
||||
/* Lock cachelines for read, non-exclusive access */
|
||||
@ -299,7 +299,7 @@ static inline void lru_iter_cleaning_init(struct ocf_lru_iter *iter,
|
||||
}
|
||||
|
||||
static inline void lru_iter_eviction_init(struct ocf_lru_iter *iter,
|
||||
ocf_cache_t cache, struct ocf_user_part *part,
|
||||
ocf_cache_t cache, struct ocf_part *part,
|
||||
uint32_t start_evp, bool cl_lock_write,
|
||||
struct ocf_request *req)
|
||||
{
|
||||
@ -438,7 +438,7 @@ static inline ocf_cache_line_t lru_iter_eviction_next(struct ocf_lru_iter *iter,
|
||||
uint32_t curr_evp;
|
||||
ocf_cache_line_t cline;
|
||||
ocf_cache_t cache = iter->cache;
|
||||
struct ocf_user_part *part = iter->part;
|
||||
struct ocf_part *part = iter->part;
|
||||
struct ocf_lru_list *list;
|
||||
|
||||
do {
|
||||
@ -531,26 +531,26 @@ static int evp_lru_clean_get(ocf_cache_t cache, void *getter_context,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
|
||||
ocf_queue_t io_queue, uint32_t count)
|
||||
{
|
||||
struct ocf_part_cleaning_ctx *ctx = &part->cleaning;
|
||||
struct ocf_part_cleaning_ctx *ctx = &user_part->cleaning;
|
||||
struct ocf_cleaner_attribs attribs = {
|
||||
.lock_cacheline = false,
|
||||
.lock_metadata = true,
|
||||
.do_sort = true,
|
||||
|
||||
.cmpl_context = &part->cleaning,
|
||||
.cmpl_context = ctx,
|
||||
.cmpl_fn = evp_lru_clean_end,
|
||||
|
||||
.getter = evp_lru_clean_get,
|
||||
.getter_context = &part->cleaning,
|
||||
.getter_context = ctx,
|
||||
|
||||
.count = min(count, OCF_EVICTION_CLEAN_SIZE),
|
||||
|
||||
.io_queue = io_queue
|
||||
};
|
||||
ocf_cache_line_t *cline = part->cleaning.cline;
|
||||
ocf_cache_line_t *cline = ctx->cline;
|
||||
struct ocf_lru_iter iter;
|
||||
unsigned evp;
|
||||
int cnt;
|
||||
@ -571,7 +571,7 @@ void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
return;
|
||||
}
|
||||
|
||||
part->cleaning.cache = cache;
|
||||
ctx->cache = cache;
|
||||
evp = io_queue->eviction_idx++ % OCF_NUM_EVICTION_LISTS;
|
||||
|
||||
lock_idx = ocf_metadata_concurrency_next_idx(io_queue);
|
||||
@ -579,7 +579,7 @@ void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
|
||||
OCF_METADATA_EVICTION_WR_LOCK_ALL();
|
||||
|
||||
lru_iter_cleaning_init(&iter, cache, part, evp);
|
||||
lru_iter_cleaning_init(&iter, cache, &user_part->part, evp);
|
||||
i = 0;
|
||||
while (i < OCF_EVICTION_CLEAN_SIZE) {
|
||||
cline[i] = lru_iter_cleaning_next(&iter);
|
||||
@ -609,7 +609,7 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
||||
|
||||
/* the caller must hold the metadata lock */
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no)
|
||||
struct ocf_part *part, uint32_t cline_no)
|
||||
{
|
||||
struct ocf_alock* alock;
|
||||
struct ocf_lru_iter iter;
|
||||
@ -737,7 +737,7 @@ static inline void _lru_init(struct ocf_lru_list *list)
|
||||
list->last_hot = end_marker;
|
||||
}
|
||||
|
||||
void evp_lru_init_evp(ocf_cache_t cache, struct ocf_user_part *part)
|
||||
void evp_lru_init_evp(ocf_cache_t cache, struct ocf_part *part)
|
||||
{
|
||||
struct ocf_lru_list *clean_list;
|
||||
struct ocf_lru_list *dirty_list;
|
||||
@ -752,7 +752,7 @@ void evp_lru_init_evp(ocf_cache_t cache, struct ocf_user_part *part)
|
||||
}
|
||||
}
|
||||
|
||||
void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_part *part,
|
||||
uint32_t cline)
|
||||
{
|
||||
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
|
||||
@ -770,7 +770,7 @@ void evp_lru_clean_cline(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
OCF_METADATA_EVICTION_WR_UNLOCK(cline);
|
||||
}
|
||||
|
||||
void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
void evp_lru_dirty_cline(ocf_cache_t cache, struct ocf_part *part,
|
||||
uint32_t cline)
|
||||
{
|
||||
uint32_t ev_list = (cline % OCF_NUM_EVICTION_LISTS);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "eviction.h"
|
||||
#include "lru_structs.h"
|
||||
|
||||
struct ocf_part;
|
||||
struct ocf_user_part;
|
||||
struct ocf_request;
|
||||
|
||||
@ -15,13 +16,13 @@ void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
bool evp_lru_can_evict(struct ocf_cache *cache);
|
||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||
struct ocf_user_part *part, uint32_t cline_no);
|
||||
struct ocf_part *part, uint32_t cline_no);
|
||||
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_user_part *part);
|
||||
void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
void evp_lru_init_evp(struct ocf_cache *cache, struct ocf_part *part);
|
||||
void evp_lru_dirty_cline(struct ocf_cache *cache, struct ocf_part *part,
|
||||
uint32_t cline);
|
||||
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_user_part *part,
|
||||
void evp_lru_clean_cline(struct ocf_cache *cache, struct ocf_part *part,
|
||||
uint32_t cline);
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *part,
|
||||
void evp_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
|
||||
ocf_queue_t io_queue, uint32_t count);
|
||||
#endif
|
||||
|
@ -53,7 +53,7 @@ static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_eviction_need_space(ocf_cache_t cache,
|
||||
struct ocf_request *req, struct ocf_user_part *part,
|
||||
struct ocf_request *req, struct ocf_part *part,
|
||||
uint32_t clines)
|
||||
{
|
||||
uint8_t type;
|
||||
@ -84,7 +84,7 @@ static inline void ocf_eviction_set_hot_cache_line(
|
||||
}
|
||||
|
||||
static inline void ocf_eviction_initialize(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part)
|
||||
struct ocf_part *part)
|
||||
{
|
||||
uint8_t type = cache->conf_meta->eviction_policy_type;
|
||||
|
||||
@ -98,7 +98,7 @@ static inline void ocf_eviction_initialize(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, ocf_queue_t io_queue,
|
||||
struct ocf_user_part *user_part, ocf_queue_t io_queue,
|
||||
uint32_t count)
|
||||
{
|
||||
uint8_t type = cache->conf_meta->eviction_policy_type;
|
||||
@ -106,7 +106,7 @@ static inline void ocf_eviction_flush_dirty(ocf_cache_t cache,
|
||||
ENV_BUG_ON(type >= ocf_eviction_max);
|
||||
|
||||
if (likely(evict_policy_ops[type].flush_dirty)) {
|
||||
evict_policy_ops[type].flush_dirty(cache, part, io_queue,
|
||||
evict_policy_ops[type].flush_dirty(cache, user_part, io_queue,
|
||||
count);
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,11 @@
|
||||
|
||||
#define OCF_METADATA_HASH_DIFF_MAX 1000
|
||||
|
||||
struct ocf_part_runtime_meta {
|
||||
struct ocf_part_runtime runtime;
|
||||
struct cleaning_policy clean_pol;
|
||||
};
|
||||
|
||||
enum {
|
||||
ocf_metadata_status_type_valid = 0,
|
||||
ocf_metadata_status_type_dirty,
|
||||
@ -86,10 +91,10 @@ static ocf_cache_line_t ocf_metadata_get_entries(
|
||||
return 32;
|
||||
|
||||
case metadata_segment_part_config:
|
||||
return OCF_IO_CLASS_MAX + 1;
|
||||
return OCF_USER_IO_CLASS_MAX + 1;
|
||||
|
||||
case metadata_segment_part_runtime:
|
||||
return OCF_IO_CLASS_MAX + 1;
|
||||
return OCF_USER_IO_CLASS_MAX + 1;
|
||||
|
||||
case metadata_segment_core_config:
|
||||
return OCF_CORE_MAX;
|
||||
@ -154,7 +159,7 @@ static int64_t ocf_metadata_get_element_size(
|
||||
break;
|
||||
|
||||
case metadata_segment_part_runtime:
|
||||
size = sizeof(struct ocf_user_part_runtime);
|
||||
size = sizeof(struct ocf_part_runtime_meta);
|
||||
break;
|
||||
|
||||
case metadata_segment_hash:
|
||||
@ -515,7 +520,7 @@ static int ocf_metadata_init_fixed_size(struct ocf_cache *cache,
|
||||
struct ocf_core_meta_config *core_meta_config;
|
||||
struct ocf_core_meta_runtime *core_meta_runtime;
|
||||
struct ocf_user_part_config *part_config;
|
||||
struct ocf_user_part_runtime *part_runtime;
|
||||
struct ocf_part_runtime_meta *part_runtime_meta;
|
||||
struct ocf_metadata_segment *superblock;
|
||||
ocf_core_t core;
|
||||
ocf_core_id_t core_id;
|
||||
@ -565,12 +570,15 @@ static int ocf_metadata_init_fixed_size(struct ocf_cache *cache,
|
||||
|
||||
/* Set partition metadata */
|
||||
part_config = METADATA_MEM_POOL(ctrl, metadata_segment_part_config);
|
||||
part_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_part_runtime);
|
||||
part_runtime_meta = METADATA_MEM_POOL(ctrl,
|
||||
metadata_segment_part_runtime);
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX + 1; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX + 1; i++) {
|
||||
cache->user_parts[i].config = &part_config[i];
|
||||
cache->user_parts[i].runtime = &part_runtime[i];
|
||||
cache->user_parts[i].id = i;
|
||||
cache->user_parts[i].clean_pol = &part_runtime_meta[i].clean_pol;
|
||||
cache->user_parts[i].part.runtime =
|
||||
&part_runtime_meta[i].runtime;
|
||||
cache->user_parts[i].part.id = i;
|
||||
}
|
||||
|
||||
/* Set core metadata */
|
||||
|
@ -58,7 +58,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
|
||||
end_line = ocf_bytes_2_lines(cache, end_byte);
|
||||
|
||||
if (part_id != PARTITION_INVALID) {
|
||||
for (i = cache->user_parts[part_id].runtime->head;
|
||||
for (i = cache->user_parts[part_id].part.runtime->head;
|
||||
i != cache->device->collision_table_entries;
|
||||
i = next_i) {
|
||||
next_i = ocf_metadata_get_partition_next(cache, i);
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "ocf/ocf.h"
|
||||
#include "metadata.h"
|
||||
#include "metadata_internal.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
|
||||
void ocf_metadata_get_partition_info(struct ocf_cache *cache,
|
||||
ocf_cache_line_t line, ocf_part_id_t *part_id,
|
||||
@ -92,7 +92,7 @@ void ocf_metadata_set_partition_info(struct ocf_cache *cache,
|
||||
static void update_partition_head(struct ocf_cache *cache,
|
||||
ocf_part_id_t part_id, ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &cache->user_parts[part_id].part;
|
||||
|
||||
part->runtime->head = line;
|
||||
}
|
||||
@ -103,7 +103,8 @@ void ocf_metadata_add_to_partition(struct ocf_cache *cache,
|
||||
{
|
||||
ocf_cache_line_t line_head;
|
||||
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_user_part *user_part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &user_part->part;
|
||||
|
||||
ENV_BUG_ON(!(line < line_entries));
|
||||
|
||||
@ -116,11 +117,11 @@ void ocf_metadata_add_to_partition(struct ocf_cache *cache,
|
||||
ocf_metadata_set_partition_info(cache, line, part_id,
|
||||
line_entries, line_entries);
|
||||
|
||||
if (!ocf_part_is_valid(part)) {
|
||||
if (!ocf_user_part_is_valid(user_part)) {
|
||||
/* Partition becomes empty, and is not valid
|
||||
* update list of partitions
|
||||
*/
|
||||
ocf_part_sort(cache);
|
||||
ocf_user_part_sort(cache);
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -149,7 +150,8 @@ void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
|
||||
int is_head, is_tail;
|
||||
ocf_cache_line_t prev_line, next_line;
|
||||
uint32_t line_entries = cache->device->collision_table_entries;
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_user_part *user_part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &user_part->part;
|
||||
|
||||
ENV_BUG_ON(!(line < line_entries));
|
||||
|
||||
@ -172,11 +174,11 @@ void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
|
||||
|
||||
update_partition_head(cache, part_id, line_entries);
|
||||
|
||||
if (!ocf_part_is_valid(part)) {
|
||||
if (!ocf_user_part_is_valid(user_part)) {
|
||||
/* Partition becomes not empty, and is not valid
|
||||
* update list of partitions
|
||||
*/
|
||||
ocf_part_sort(cache);
|
||||
ocf_user_part_sort(cache);
|
||||
}
|
||||
|
||||
} else if (is_head) {
|
||||
|
@ -26,11 +26,10 @@ struct ocf_user_part_config {
|
||||
ocf_cache_mode_t cache_mode;
|
||||
};
|
||||
|
||||
struct ocf_user_part_runtime {
|
||||
struct ocf_part_runtime {
|
||||
uint32_t curr_size;
|
||||
uint32_t head;
|
||||
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
|
||||
struct cleaning_policy cleaning;
|
||||
};
|
||||
|
||||
typedef bool ( *_lru_hash_locked_pfn)(struct ocf_request *req,
|
||||
@ -45,7 +44,7 @@ struct ocf_lru_iter
|
||||
/* cache object */
|
||||
ocf_cache_t cache;
|
||||
/* target partition */
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_part *part;
|
||||
/* available (non-empty) eviction list bitmap rotated so that current
|
||||
@evp is on the most significant bit */
|
||||
unsigned long long next_avail_evp;
|
||||
@ -72,10 +71,18 @@ struct ocf_part_cleaning_ctx {
|
||||
ocf_cache_line_t cline[OCF_EVICTION_CLEAN_SIZE];
|
||||
};
|
||||
|
||||
/* common partition data for both user-deined partitions as
|
||||
* well as freelist
|
||||
*/
|
||||
struct ocf_part {
|
||||
struct ocf_part_runtime *runtime;
|
||||
ocf_part_id_t id;
|
||||
};
|
||||
|
||||
struct ocf_user_part {
|
||||
struct ocf_user_part_config *config;
|
||||
struct ocf_user_part_runtime *runtime;
|
||||
ocf_part_id_t id;
|
||||
struct cleaning_policy *clean_pol;
|
||||
struct ocf_part part;
|
||||
struct ocf_part_cleaning_ctx cleaning;
|
||||
struct ocf_lst_entry lst_valid;
|
||||
};
|
||||
|
@ -56,7 +56,7 @@ struct ocf_metadata_lock
|
||||
struct ocf_metadata_global_lock global[OCF_NUM_GLOBAL_META_LOCKS];
|
||||
/*!< global metadata lock (GML) */
|
||||
env_rwlock eviction[OCF_NUM_EVICTION_LISTS]; /*!< Fast lock for eviction policy */
|
||||
env_spinlock partition[OCF_IO_CLASS_MAX]; /* partition lock */
|
||||
env_spinlock partition[OCF_USER_IO_CLASS_MAX]; /* partition lock */
|
||||
env_rwsem *hash; /*!< Hash bucket locks */
|
||||
env_rwsem *collision_pages; /*!< Collision table page locks */
|
||||
ocf_cache_t cache; /*!< Parent cache object */
|
||||
|
@ -161,7 +161,7 @@ static void ocf_metadata_load_superblock_post(ocf_pipeline_t pipeline,
|
||||
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
|
||||
}
|
||||
|
||||
if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) {
|
||||
if (sb_config->valid_parts_no > OCF_USER_IO_CLASS_MAX) {
|
||||
ocf_cache_log(cache, log_err,
|
||||
"Loading cache state ERROR, invalid partition count\n");
|
||||
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL);
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../metadata/metadata_io.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
@ -169,7 +169,7 @@ static void __init_partitions(ocf_cache_t cache)
|
||||
OCF_IO_CLASS_PRIO_LOWEST, true));
|
||||
|
||||
/* Add other partition to the cache and make it as dummy */
|
||||
for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
|
||||
for (i_part = 0; i_part < OCF_USER_IO_CLASS_MAX; i_part++) {
|
||||
ocf_refcnt_freeze(&cache->user_parts[i_part].cleaning.counter);
|
||||
|
||||
if (i_part == PARTITION_DEFAULT)
|
||||
@ -182,13 +182,13 @@ static void __init_partitions(ocf_cache_t cache)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init_partitions_attached(ocf_cache_t cache)
|
||||
static void __init_user_parts_attached(ocf_cache_t cache)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_part *part;
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) {
|
||||
part = &cache->user_parts[part_id];
|
||||
for (part_id = 0; part_id < OCF_USER_IO_CLASS_MAX; part_id++) {
|
||||
part = &cache->user_parts[part_id].part;
|
||||
|
||||
part->runtime->head = cache->device->collision_table_entries;
|
||||
part->runtime->curr_size = 0;
|
||||
@ -283,7 +283,7 @@ static void __reset_stats(ocf_cache_t cache)
|
||||
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
|
||||
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
|
||||
|
||||
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
|
||||
env_atomic_set(&core->runtime_meta->
|
||||
part_counters[i].cached_clines, 0);
|
||||
env_atomic_set(&core->runtime_meta->
|
||||
@ -301,7 +301,7 @@ static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
|
||||
|
||||
ocf_metadata_init_hash_table(cache);
|
||||
ocf_metadata_init_collision(cache);
|
||||
__init_partitions_attached(cache);
|
||||
__init_user_parts_attached(cache);
|
||||
__init_freelist(cache);
|
||||
|
||||
result = __init_cleaning_policy(cache);
|
||||
@ -321,7 +321,7 @@ static void init_attached_data_structures_recovery(ocf_cache_t cache)
|
||||
{
|
||||
ocf_metadata_init_hash_table(cache);
|
||||
ocf_metadata_init_collision(cache);
|
||||
__init_partitions_attached(cache);
|
||||
__init_user_parts_attached(cache);
|
||||
__reset_stats(cache);
|
||||
__init_metadata_version(cache);
|
||||
}
|
||||
@ -1172,7 +1172,7 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache,
|
||||
INIT_LIST_HEAD(&cache->io_queues);
|
||||
|
||||
/* Init Partitions */
|
||||
ocf_part_init(cache);
|
||||
ocf_user_part_init(cache);
|
||||
|
||||
__init_cores(cache);
|
||||
__init_metadata_version(cache);
|
||||
|
@ -457,7 +457,7 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
|
||||
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
|
||||
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
|
||||
|
||||
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
|
||||
env_atomic_set(&core->runtime_meta->
|
||||
part_counters[i].cached_clines, 0);
|
||||
env_atomic_set(&core->runtime_meta->
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_pipeline.h"
|
||||
#include "../utils/utils_refcnt.h"
|
||||
#include "../ocf_request.h"
|
||||
|
@ -8,19 +8,19 @@
|
||||
#include "../ocf_priv.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../eviction/ops.h"
|
||||
#include "ocf_env.h"
|
||||
|
||||
static uint64_t _ocf_mngt_count_parts_min_size(struct ocf_cache *cache)
|
||||
static uint64_t _ocf_mngt_count_user_parts_min_size(struct ocf_cache *cache)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
ocf_part_id_t part_id;
|
||||
uint64_t count = 0;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
if (ocf_part_is_valid(part))
|
||||
count += part->config->min_size;
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
if (ocf_user_part_is_valid(user_part))
|
||||
count += user_part->config->min_size;
|
||||
}
|
||||
|
||||
return count;
|
||||
@ -37,7 +37,7 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
|
||||
if (!name)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (part_id >= OCF_IO_CLASS_MAX)
|
||||
if (part_id >= OCF_USER_IO_CLASS_MAX)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (cache->user_parts[part_id].config->flags.valid)
|
||||
@ -56,7 +56,7 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
for_each_lst(&cache->lst_part, iter, iter_id) {
|
||||
for_each_lst(&cache->user_part_list, iter, iter_id) {
|
||||
if (iter_id == part_id) {
|
||||
ocf_cache_log(cache, log_err,
|
||||
"Part with id %hu already exists\n", part_id);
|
||||
@ -73,9 +73,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
|
||||
cache->user_parts[part_id].config->priority = priority;
|
||||
cache->user_parts[part_id].config->cache_mode = ocf_cache_mode_max;
|
||||
|
||||
ocf_part_set_valid(cache, part_id, valid);
|
||||
ocf_lst_add(&cache->lst_part, part_id);
|
||||
ocf_part_sort(cache);
|
||||
ocf_user_part_set_valid(cache, part_id, valid);
|
||||
ocf_lst_add(&cache->user_part_list, part_id);
|
||||
ocf_user_part_sort(cache);
|
||||
|
||||
cache->user_parts[part_id].config->flags.added = 1;
|
||||
|
||||
@ -85,12 +85,13 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
|
||||
static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
|
||||
ocf_part_id_t part_id, uint32_t min, uint32_t max)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_user_part *user_part = &cache->user_parts[part_id];
|
||||
|
||||
if (min > max)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (_ocf_mngt_count_parts_min_size(cache) + min > PARTITION_SIZE_MAX) {
|
||||
if (_ocf_mngt_count_user_parts_min_size(cache) + min >
|
||||
PARTITION_SIZE_MAX) {
|
||||
/* Illegal configuration in which sum of all min_sizes exceeds
|
||||
* cache size.
|
||||
*/
|
||||
@ -100,8 +101,8 @@ static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
|
||||
if (max > PARTITION_SIZE_MAX)
|
||||
max = PARTITION_SIZE_MAX;
|
||||
|
||||
part->config->min_size = min;
|
||||
part->config->max_size = max;
|
||||
user_part->config->min_size = min;
|
||||
user_part->config->max_size = max;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -123,7 +124,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
|
||||
|
||||
dest_part = &cache->user_parts[part_id];
|
||||
|
||||
if (!ocf_part_is_added(dest_part)) {
|
||||
if (!ocf_user_part_is_added(dest_part)) {
|
||||
ocf_cache_log(cache, log_info, "Setting IO class, id: %u, "
|
||||
"name: '%s' [ ERROR ]\n", part_id, dest_part->config->name);
|
||||
return -OCF_ERR_INVAL;
|
||||
@ -150,7 +151,7 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
|
||||
" [ ERROR ]\n", part_id, dest_part->config->name, max);
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
ocf_part_set_prio(cache, dest_part, prio);
|
||||
ocf_user_part_set_prio(cache, dest_part, prio);
|
||||
dest_part->config->cache_mode = cache_mode;
|
||||
|
||||
ocf_cache_log(cache, log_info,
|
||||
@ -175,21 +176,21 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
if (ocf_part_is_valid(dest_part)) {
|
||||
if (ocf_user_part_is_valid(dest_part)) {
|
||||
/* Updating existing */
|
||||
ocf_cache_log(cache, log_info, "Updating existing IO "
|
||||
"class, id: %u, name: '%s', max size %u%% [ OK ]\n",
|
||||
part_id, dest_part->config->name, max);
|
||||
} else {
|
||||
/* Adding new */
|
||||
ocf_part_set_valid(cache, part_id, true);
|
||||
ocf_user_part_set_valid(cache, part_id, true);
|
||||
|
||||
ocf_cache_log(cache, log_info, "Adding new IO class, "
|
||||
"id: %u, name: '%s', max size %u%% [ OK ]\n", part_id,
|
||||
dest_part->config->name, max);
|
||||
}
|
||||
|
||||
ocf_part_set_prio(cache, dest_part, prio);
|
||||
ocf_user_part_set_prio(cache, dest_part, prio);
|
||||
dest_part->config->cache_mode = cache_mode;
|
||||
|
||||
return result;
|
||||
@ -212,13 +213,13 @@ static void _ocf_mngt_io_class_remove(ocf_cache_t cache,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ocf_part_is_valid(dest_part)) {
|
||||
if (!ocf_user_part_is_valid(dest_part)) {
|
||||
/* Does not exist */
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
ocf_part_set_valid(cache, part_id, false);
|
||||
ocf_user_part_set_valid(cache, part_id, false);
|
||||
|
||||
ocf_cache_log(cache, log_info,
|
||||
"Removing IO class, id: %u [ OK ]\n", part_id);
|
||||
@ -240,7 +241,7 @@ static int _ocf_mngt_io_class_edit(ocf_cache_t cache,
|
||||
static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
|
||||
const struct ocf_mngt_io_class_config *cfg)
|
||||
{
|
||||
if (cfg->class_id >= OCF_IO_CLASS_MAX)
|
||||
if (cfg->class_id >= OCF_USER_IO_CLASS_MAX)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
/* Name set to null means particular io_class should be removed */
|
||||
@ -252,13 +253,13 @@ static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
if (!ocf_part_is_name_valid(cfg->name)) {
|
||||
if (!ocf_user_part_is_name_valid(cfg->name)) {
|
||||
ocf_cache_log(cache, log_info,
|
||||
"The name of the partition is not valid\n");
|
||||
return -OCF_ERR_INVAL;
|
||||
}
|
||||
|
||||
if (!ocf_part_is_prio_valid(cfg->prio)) {
|
||||
if (!ocf_user_part_is_prio_valid(cfg->prio)) {
|
||||
ocf_cache_log(cache, log_info,
|
||||
"Invalid value of the partition priority\n");
|
||||
return -OCF_ERR_INVAL;
|
||||
@ -284,7 +285,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
|
||||
OCF_CHECK_NULL(cache);
|
||||
OCF_CHECK_NULL(cfg);
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
result = _ocf_mngt_io_class_validate_cfg(cache, &cfg->config[i]);
|
||||
if (result)
|
||||
return result;
|
||||
@ -301,7 +302,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
|
||||
if (result)
|
||||
goto out_cpy;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
result = _ocf_mngt_io_class_edit(cache, &cfg->config[i]);
|
||||
if (result) {
|
||||
ocf_cache_log(cache, log_err,
|
||||
@ -310,7 +311,7 @@ int ocf_mngt_cache_io_classes_configure(ocf_cache_t cache,
|
||||
}
|
||||
}
|
||||
|
||||
ocf_part_sort(cache);
|
||||
ocf_user_part_sort(cache);
|
||||
|
||||
out_edit:
|
||||
if (result) {
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_cache_line.h"
|
||||
#include "ocf_request.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_user_part.h"
|
||||
#include "ocf_priv.h"
|
||||
#include "ocf_cache_priv.h"
|
||||
#include "ocf_queue_priv.h"
|
||||
|
@ -77,8 +77,8 @@ struct ocf_cache {
|
||||
|
||||
struct ocf_cache_device *device;
|
||||
|
||||
struct ocf_lst lst_part;
|
||||
struct ocf_user_part user_parts[OCF_IO_CLASS_MAX + 1];
|
||||
struct ocf_lst user_part_list;
|
||||
struct ocf_user_part user_parts[OCF_USER_IO_CLASS_MAX + 1];
|
||||
|
||||
ocf_freelist_t freelist;
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "ocf_io_priv.h"
|
||||
#include "metadata/metadata.h"
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_user_part.h"
|
||||
#include "ocf_request.h"
|
||||
#include "ocf_trace_priv.h"
|
||||
|
||||
@ -186,7 +186,7 @@ static inline int ocf_core_validate_io(struct ocf_io *io)
|
||||
if (io->addr + io->bytes > ocf_volume_get_length(volume))
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (io->io_class >= OCF_IO_CLASS_MAX)
|
||||
if (io->io_class >= OCF_USER_IO_CLASS_MAX)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (io->dir != OCF_READ && io->dir != OCF_WRITE)
|
||||
@ -248,7 +248,7 @@ void ocf_core_volume_submit_io(struct ocf_io *io)
|
||||
return;
|
||||
}
|
||||
|
||||
req->part_id = ocf_part_class2id(cache, io->io_class);
|
||||
req->part_id = ocf_user_part_class2id(cache, io->io_class);
|
||||
req->core = core;
|
||||
req->complete = ocf_req_complete;
|
||||
|
||||
@ -310,7 +310,7 @@ int ocf_core_submit_io_fast(struct ocf_io *io)
|
||||
|
||||
req->core = core;
|
||||
req->complete = ocf_req_complete;
|
||||
req->part_id = ocf_part_class2id(cache, io->io_class);
|
||||
req->part_id = ocf_user_part_class2id(cache, io->io_class);
|
||||
|
||||
ocf_resolve_effective_cache_mode(cache, core, req);
|
||||
|
||||
|
@ -70,7 +70,7 @@ struct ocf_core_meta_runtime {
|
||||
* cache device
|
||||
*/
|
||||
env_atomic dirty_clines;
|
||||
} part_counters[OCF_IO_CLASS_MAX];
|
||||
} part_counters[OCF_USER_IO_CLASS_MAX];
|
||||
};
|
||||
|
||||
struct ocf_core {
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "ocf/ocf.h"
|
||||
#include "metadata/metadata.h"
|
||||
|
||||
struct ocf_part {
|
||||
struct ocf_freelist_part {
|
||||
ocf_cache_line_t head;
|
||||
ocf_cache_line_t tail;
|
||||
env_atomic64 curr_size;
|
||||
@ -17,7 +17,7 @@ struct ocf_freelist {
|
||||
struct ocf_cache *cache;
|
||||
|
||||
/* partition list array */
|
||||
struct ocf_part *part;
|
||||
struct ocf_freelist_part *part;
|
||||
|
||||
/* freelist lock array */
|
||||
env_spinlock *lock;
|
||||
@ -52,7 +52,7 @@ static void _ocf_freelist_remove_cache_line(ocf_freelist_t freelist,
|
||||
uint32_t ctx, ocf_cache_line_t cline)
|
||||
{
|
||||
struct ocf_cache *cache = freelist->cache;
|
||||
struct ocf_part *freelist_part = &freelist->part[ctx];
|
||||
struct ocf_freelist_part *freelist_part = &freelist->part[ctx];
|
||||
int is_head, is_tail;
|
||||
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
|
||||
ocf_cache_line_t prev, next;
|
||||
@ -217,7 +217,7 @@ static void ocf_freelist_add_cache_line(ocf_freelist_t freelist,
|
||||
uint32_t ctx, ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache *cache = freelist->cache;
|
||||
struct ocf_part *freelist_part = &freelist->part[ctx];
|
||||
struct ocf_freelist_part *freelist_part = &freelist->part[ctx];
|
||||
ocf_cache_line_t tail;
|
||||
ocf_cache_line_t line_entries = ocf_metadata_collision_table_entries(
|
||||
freelist->cache);
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "ocf_priv.h"
|
||||
#include "metadata/metadata.h"
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_user_part.h"
|
||||
|
||||
int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
|
||||
struct ocf_io_class_info *info)
|
||||
@ -19,10 +19,10 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
|
||||
if (!info)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (io_class >= OCF_IO_CLASS_MAX)
|
||||
if (io_class >= OCF_USER_IO_CLASS_MAX)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
if (!ocf_part_is_valid(&cache->user_parts[part_id])) {
|
||||
if (!ocf_user_part_is_valid(&cache->user_parts[part_id])) {
|
||||
/* Partition does not exist */
|
||||
return -OCF_ERR_IO_CLASS_NOT_EXIST;
|
||||
}
|
||||
@ -35,7 +35,7 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
|
||||
|
||||
info->priority = cache->user_parts[part_id].config->priority;
|
||||
info->curr_size = ocf_cache_is_device_attached(cache) ?
|
||||
cache->user_parts[part_id].runtime->curr_size : 0;
|
||||
cache->user_parts[part_id].part.runtime->curr_size : 0;
|
||||
info->min_size = cache->user_parts[part_id].config->min_size;
|
||||
info->max_size = cache->user_parts[part_id].config->max_size;
|
||||
|
||||
@ -50,7 +50,7 @@ int ocf_cache_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
|
||||
int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
|
||||
void *cntx)
|
||||
{
|
||||
struct ocf_user_part *part;
|
||||
struct ocf_user_part *user_part;
|
||||
ocf_part_id_t part_id;
|
||||
int result = 0;
|
||||
|
||||
@ -59,8 +59,8 @@ int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
|
||||
if (!visitor)
|
||||
return -OCF_ERR_INVAL;
|
||||
|
||||
for_each_part(cache, part, part_id) {
|
||||
if (!ocf_part_is_valid(part))
|
||||
for_each_user_part(cache, user_part, part_id) {
|
||||
if (!ocf_user_part_is_valid(user_part))
|
||||
continue;
|
||||
|
||||
result = visitor(cache, part_id, cntx);
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "ocf_priv.h"
|
||||
#include "metadata/metadata.h"
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_user_part.h"
|
||||
#include "utils/utils_cache_line.h"
|
||||
|
||||
#ifdef OCF_DEBUG_STATS
|
||||
@ -195,7 +195,7 @@ void ocf_core_stats_initialize(ocf_core_t core)
|
||||
ocf_stats_error_init(&exp_obj_stats->cache_errors);
|
||||
ocf_stats_error_init(&exp_obj_stats->core_errors);
|
||||
|
||||
for (i = 0; i != OCF_IO_CLASS_MAX; i++)
|
||||
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++)
|
||||
ocf_stats_part_init(&exp_obj_stats->part_counters[i]);
|
||||
|
||||
#ifdef OCF_DEBUG_STATS
|
||||
@ -286,7 +286,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
|
||||
|
||||
cache = ocf_core_get_cache(core);
|
||||
|
||||
if (!ocf_part_is_valid(&cache->user_parts[part_id]))
|
||||
if (!ocf_user_part_is_valid(&cache->user_parts[part_id]))
|
||||
return -OCF_ERR_IO_CLASS_NOT_EXIST;
|
||||
|
||||
part_stat = &core->counters->part_counters[part_id];
|
||||
@ -333,7 +333,7 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
|
||||
&core_stats->debug_stats);
|
||||
#endif
|
||||
|
||||
for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i != OCF_USER_IO_CLASS_MAX; i++) {
|
||||
curr = &core_stats->part_counters[i];
|
||||
|
||||
accum_req_stats(&stats->read_reqs,
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "ocf_priv.h"
|
||||
#include "metadata/metadata.h"
|
||||
#include "engine/cache_engine.h"
|
||||
#include "utils/utils_part.h"
|
||||
#include "utils/utils_user_part.h"
|
||||
#include "utils/utils_cache_line.h"
|
||||
#include "utils/utils_stats.h"
|
||||
|
||||
|
@ -176,7 +176,7 @@ struct ocf_counters_core {
|
||||
struct ocf_counters_error core_errors;
|
||||
struct ocf_counters_error cache_errors;
|
||||
|
||||
struct ocf_counters_part part_counters[OCF_IO_CLASS_MAX];
|
||||
struct ocf_counters_part part_counters[OCF_USER_IO_CLASS_MAX];
|
||||
#ifdef OCF_DEBUG_STATS
|
||||
struct ocf_counters_debug debug_stats;
|
||||
#endif
|
||||
|
@ -102,7 +102,7 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
{
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &cache->user_parts[part_id].part;
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
bool line_is_clean;
|
||||
|
||||
@ -131,8 +131,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
env_atomic_dec(&req->core->runtime_meta->
|
||||
part_counters[part_id].dirty_clines);
|
||||
|
||||
if (likely(evict_policy_ops[evp_type].clean_cline))
|
||||
evict_policy_ops[evp_type].clean_cline(cache, part, line);
|
||||
if (likely(evict_policy_ops[evp_type].clean_cline)) {
|
||||
evict_policy_ops[evp_type].clean_cline(cache,
|
||||
part, line);
|
||||
}
|
||||
|
||||
ocf_purge_cleaning_policy(cache, line);
|
||||
}
|
||||
@ -145,7 +147,7 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
{
|
||||
ocf_cache_line_t line = req->map[map_idx].coll_idx;
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
|
||||
struct ocf_user_part *part = &cache->user_parts[part_id];
|
||||
struct ocf_part *part = &cache->user_parts[part_id].part;
|
||||
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
|
||||
bool line_was_dirty;
|
||||
|
||||
@ -174,8 +176,10 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
|
||||
env_atomic_inc(&req->core->runtime_meta->
|
||||
part_counters[part_id].dirty_clines);
|
||||
|
||||
if (likely(evict_policy_ops[evp_type].dirty_cline))
|
||||
evict_policy_ops[evp_type].dirty_cline(cache, part, line);
|
||||
if (likely(evict_policy_ops[evp_type].dirty_cline)) {
|
||||
evict_policy_ops[evp_type].dirty_cline(cache,
|
||||
part, line);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "../concurrency/ocf_concurrency.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "utils_cleaner.h"
|
||||
#include "utils_part.h"
|
||||
#include "utils_user_part.h"
|
||||
#include "utils_io.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
@ -1052,7 +1052,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
|
||||
struct ocf_user_part *curr_part;
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
for_each_user_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_freeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
@ -1061,7 +1061,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
struct ocf_user_part *curr_part;
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
for_each_user_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
@ -1084,7 +1084,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
|
||||
ctx->cb = cb;
|
||||
ctx->priv = priv;
|
||||
|
||||
for_each_part(cache, curr_part, part_id) {
|
||||
for_each_user_part(cache, curr_part, part_id) {
|
||||
env_atomic_inc(&ctx->waiting);
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
|
||||
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
|
||||
|
@ -1,180 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2021 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __UTILS_PARTITION_H__
|
||||
#define __UTILS_PARTITION_H__
|
||||
|
||||
#include "../ocf_request.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../metadata/metadata_partition.h"
|
||||
|
||||
void ocf_part_init(struct ocf_cache *cache);
|
||||
|
||||
static inline bool ocf_part_is_valid(struct ocf_user_part *part)
|
||||
{
|
||||
return !!part->config->flags.valid;
|
||||
}
|
||||
|
||||
static inline void ocf_part_set_prio(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part, int16_t prio)
|
||||
{
|
||||
if (part->config->priority != prio)
|
||||
part->config->priority = prio;
|
||||
}
|
||||
|
||||
static inline int16_t ocf_part_get_prio(struct ocf_cache *cache,
|
||||
ocf_part_id_t part_id)
|
||||
{
|
||||
if (part_id < OCF_IO_CLASS_MAX)
|
||||
return cache->user_parts[part_id].config->priority;
|
||||
|
||||
return OCF_IO_CLASS_PRIO_LOWEST;
|
||||
}
|
||||
|
||||
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
bool valid);
|
||||
|
||||
static inline bool ocf_part_is_added(struct ocf_user_part *part)
|
||||
{
|
||||
return !!part->config->flags.added;
|
||||
}
|
||||
|
||||
static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
|
||||
{
|
||||
if (class < OCF_IO_CLASS_MAX)
|
||||
if (cache->user_parts[class].config->flags.valid)
|
||||
return class;
|
||||
|
||||
return PARTITION_DEFAULT;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part)
|
||||
{
|
||||
return part->runtime->curr_size;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache,
|
||||
struct ocf_user_part *part)
|
||||
{
|
||||
uint64_t ioclass_size;
|
||||
|
||||
ioclass_size = (uint64_t)part->config->min_size *
|
||||
(uint64_t)cache->conf_meta->cachelines;
|
||||
|
||||
ioclass_size /= 100;
|
||||
|
||||
return (uint32_t)ioclass_size;
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache,
|
||||
struct ocf_user_part *part)
|
||||
{
|
||||
uint64_t ioclass_size, max_size, cache_size;
|
||||
|
||||
max_size = part->config->max_size;
|
||||
cache_size = cache->conf_meta->cachelines;
|
||||
|
||||
ioclass_size = max_size * cache_size;
|
||||
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
|
||||
|
||||
return (uint32_t)ioclass_size;
|
||||
}
|
||||
|
||||
void ocf_part_move(struct ocf_request *req);
|
||||
|
||||
#define for_each_part(cache, part, id) \
|
||||
for_each_lst_entry(&cache->lst_part, part, id, \
|
||||
struct ocf_user_part, lst_valid)
|
||||
|
||||
static inline void ocf_part_sort(struct ocf_cache *cache)
|
||||
{
|
||||
ocf_lst_sort(&cache->lst_part);
|
||||
}
|
||||
|
||||
static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
|
||||
{
|
||||
return part->config->max_size != 0;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_part_overflow_size(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part)
|
||||
{
|
||||
uint32_t part_occupancy = ocf_part_get_occupancy(part);
|
||||
uint32_t part_occupancy_limit = ocf_part_get_max_size(cache, part);
|
||||
|
||||
if (part_occupancy > part_occupancy_limit)
|
||||
return part_occupancy - part_occupancy_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ocf_part_has_space(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
|
||||
uint64_t part_occupancy_limit =
|
||||
ocf_part_get_max_size(req->cache, target_part);
|
||||
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
|
||||
ocf_engine_unmapped_count(req);
|
||||
uint64_t part_occupancy = ocf_part_get_occupancy(target_part);
|
||||
|
||||
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
|
||||
}
|
||||
|
||||
static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
|
||||
ocf_part_id_t part_id)
|
||||
{
|
||||
if (part_id < OCF_IO_CLASS_MAX)
|
||||
return cache->user_parts[part_id].config->cache_mode;
|
||||
return ocf_cache_mode_none;
|
||||
}
|
||||
|
||||
static inline bool ocf_part_is_prio_valid(int64_t prio)
|
||||
{
|
||||
switch (prio) {
|
||||
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
|
||||
case OCF_IO_CLASS_PRIO_PINNED:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* routine checks for validity of a partition name.
|
||||
*
|
||||
* Following condition is checked:
|
||||
* - string too long
|
||||
* - string containing invalid characters (outside of low ascii)
|
||||
* Following condition is NOT cheched:
|
||||
* - empty string. (empty string is NOT a valid partition name, but
|
||||
* this function returns true on empty string nevertheless).
|
||||
*
|
||||
* @return returns true if partition name is a valid name
|
||||
*/
|
||||
static inline bool ocf_part_is_name_valid(const char *name)
|
||||
{
|
||||
uint32_t length = 0;
|
||||
|
||||
while (*name) {
|
||||
if (*name < ' ' || *name > '~')
|
||||
return false;
|
||||
|
||||
if (',' == *name || '"' == *name)
|
||||
return false;
|
||||
|
||||
name++;
|
||||
length++;
|
||||
|
||||
if (length >= OCF_IO_CLASS_NAME_MAX)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __UTILS_PARTITION_H__ */
|
@ -9,17 +9,17 @@
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../eviction/ops.h"
|
||||
#include "utils_part.h"
|
||||
#include "utils_user_part.h"
|
||||
|
||||
static struct ocf_lst_entry *ocf_part_lst_getter_valid(
|
||||
static struct ocf_lst_entry *ocf_user_part_lst_getter_valid(
|
||||
struct ocf_cache *cache, ocf_cache_line_t idx)
|
||||
{
|
||||
ENV_BUG_ON(idx > OCF_IO_CLASS_MAX);
|
||||
ENV_BUG_ON(idx > OCF_USER_IO_CLASS_MAX);
|
||||
return &cache->user_parts[idx].lst_valid;
|
||||
}
|
||||
|
||||
|
||||
static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
static int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
|
||||
{
|
||||
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
|
||||
@ -27,10 +27,9 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
|
||||
lst_valid);
|
||||
size_t p1_size = ocf_cache_is_device_attached(cache) ?
|
||||
p1->runtime->curr_size : 0;
|
||||
p1->part.runtime->curr_size : 0;
|
||||
size_t p2_size = ocf_cache_is_device_attached(cache) ?
|
||||
p2->runtime->curr_size : 0;
|
||||
|
||||
p2->part.runtime->curr_size : 0;
|
||||
int v1 = p1->config->priority;
|
||||
int v2 = p2->config->priority;
|
||||
|
||||
@ -79,13 +78,14 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
return v2 - v1;
|
||||
}
|
||||
|
||||
void ocf_part_init(struct ocf_cache *cache)
|
||||
void ocf_user_part_init(struct ocf_cache *cache)
|
||||
{
|
||||
ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX,
|
||||
ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid);
|
||||
ocf_lst_init(cache, &cache->user_part_list, OCF_USER_IO_CLASS_MAX,
|
||||
ocf_user_part_lst_getter_valid,
|
||||
ocf_user_part_lst_cmp_valid);
|
||||
}
|
||||
|
||||
void ocf_part_move(struct ocf_request *req)
|
||||
void ocf_user_part_move(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
struct ocf_map_info *entry;
|
||||
@ -117,8 +117,8 @@ void ocf_part_move(struct ocf_request *req)
|
||||
id_old = ocf_metadata_get_partition_id(cache, line);
|
||||
id_new = req->part_id;
|
||||
|
||||
ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
|
||||
id_new >= OCF_IO_CLASS_MAX);
|
||||
ENV_BUG_ON(id_old >= OCF_USER_IO_CLASS_MAX ||
|
||||
id_new >= OCF_USER_IO_CLASS_MAX);
|
||||
|
||||
if (id_old == id_new) {
|
||||
/* Partition of the request and cache line is the same,
|
||||
@ -175,22 +175,23 @@ void ocf_part_move(struct ocf_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
bool valid)
|
||||
{
|
||||
struct ocf_user_part *part = &cache->user_parts[id];
|
||||
struct ocf_user_part *user_part = &cache->user_parts[id];
|
||||
|
||||
if (valid ^ part->config->flags.valid) {
|
||||
if (valid ^ user_part->config->flags.valid) {
|
||||
if (valid) {
|
||||
part->config->flags.valid = true;
|
||||
user_part->config->flags.valid = true;
|
||||
cache->conf_meta->valid_parts_no++;
|
||||
} else {
|
||||
part->config->flags.valid = false;
|
||||
user_part->config->flags.valid = false;
|
||||
cache->conf_meta->valid_parts_no--;
|
||||
part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
|
||||
part->config->min_size = 0;
|
||||
part->config->max_size = PARTITION_SIZE_MAX;
|
||||
ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name),
|
||||
user_part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
|
||||
user_part->config->min_size = 0;
|
||||
user_part->config->max_size = PARTITION_SIZE_MAX;
|
||||
ENV_BUG_ON(env_strncpy(user_part->config->name,
|
||||
sizeof(user_part->config->name),
|
||||
"Inactive", 9));
|
||||
}
|
||||
}
|
181
src/utils/utils_user_part.h
Normal file
181
src/utils/utils_user_part.h
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright(c) 2012-2021 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#ifndef __UTILS_PARTITION_H__
|
||||
#define __UTILS_PARTITION_H__
|
||||
|
||||
#include "../ocf_request.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../metadata/metadata_partition.h"
|
||||
|
||||
void ocf_user_part_init(struct ocf_cache *cache);
|
||||
|
||||
static inline bool ocf_user_part_is_valid(struct ocf_user_part *user_part)
|
||||
{
|
||||
return !!user_part->config->flags.valid;
|
||||
}
|
||||
|
||||
static inline void ocf_user_part_set_prio(struct ocf_cache *cache,
|
||||
struct ocf_user_part *user_part, int16_t prio)
|
||||
{
|
||||
if (user_part->config->priority != prio)
|
||||
user_part->config->priority = prio;
|
||||
}
|
||||
|
||||
static inline int16_t ocf_user_part_get_prio(struct ocf_cache *cache,
|
||||
ocf_part_id_t part_id)
|
||||
{
|
||||
if (part_id < OCF_USER_IO_CLASS_MAX)
|
||||
return cache->user_parts[part_id].config->priority;
|
||||
|
||||
return OCF_IO_CLASS_PRIO_LOWEST;
|
||||
}
|
||||
|
||||
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
bool valid);
|
||||
|
||||
static inline bool ocf_user_part_is_added(struct ocf_user_part *user_part)
|
||||
{
|
||||
return !!user_part->config->flags.added;
|
||||
}
|
||||
|
||||
static inline ocf_part_id_t ocf_user_part_class2id(ocf_cache_t cache, uint64_t class)
|
||||
{
|
||||
if (class < OCF_USER_IO_CLASS_MAX)
|
||||
if (cache->user_parts[class].config->flags.valid)
|
||||
return class;
|
||||
|
||||
return PARTITION_DEFAULT;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_part_get_occupancy(struct ocf_part *part)
|
||||
{
|
||||
return part->runtime->curr_size;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_user_part_get_min_size(ocf_cache_t cache,
|
||||
struct ocf_user_part *user_part)
|
||||
{
|
||||
uint64_t ioclass_size;
|
||||
|
||||
ioclass_size = (uint64_t)user_part->config->min_size *
|
||||
(uint64_t)cache->conf_meta->cachelines;
|
||||
|
||||
ioclass_size /= 100;
|
||||
|
||||
return (uint32_t)ioclass_size;
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t ocf_user_part_get_max_size(ocf_cache_t cache,
|
||||
struct ocf_user_part *user_part)
|
||||
{
|
||||
uint64_t ioclass_size, max_size, cache_size;
|
||||
|
||||
max_size = user_part->config->max_size;
|
||||
cache_size = cache->conf_meta->cachelines;
|
||||
|
||||
ioclass_size = max_size * cache_size;
|
||||
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
|
||||
|
||||
return (uint32_t)ioclass_size;
|
||||
}
|
||||
|
||||
void ocf_user_part_move(struct ocf_request *req);
|
||||
|
||||
#define for_each_user_part(cache, user_part, id) \
|
||||
for_each_lst_entry(&cache->user_part_list, user_part, id, \
|
||||
struct ocf_user_part, lst_valid)
|
||||
|
||||
static inline void ocf_user_part_sort(struct ocf_cache *cache)
|
||||
{
|
||||
ocf_lst_sort(&cache->user_part_list);
|
||||
}
|
||||
|
||||
static inline bool ocf_user_part_is_enabled(struct ocf_user_part *user_part)
|
||||
{
|
||||
return user_part->config->max_size != 0;
|
||||
}
|
||||
|
||||
static inline uint32_t ocf_user_part_overflow_size(struct ocf_cache *cache,
|
||||
struct ocf_user_part *user_part)
|
||||
{
|
||||
uint32_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
|
||||
uint32_t part_occupancy_limit = ocf_user_part_get_max_size(cache,
|
||||
user_part);
|
||||
|
||||
if (part_occupancy > part_occupancy_limit)
|
||||
return part_occupancy - part_occupancy_limit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ocf_user_part_has_space(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id];
|
||||
uint64_t part_occupancy_limit =
|
||||
ocf_user_part_get_max_size(req->cache, user_part);
|
||||
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
|
||||
ocf_engine_unmapped_count(req);
|
||||
uint64_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
|
||||
|
||||
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
|
||||
}
|
||||
|
||||
static inline ocf_cache_mode_t ocf_user_part_get_cache_mode(ocf_cache_t cache,
|
||||
ocf_part_id_t part_id)
|
||||
{
|
||||
if (part_id < OCF_USER_IO_CLASS_MAX)
|
||||
return cache->user_parts[part_id].config->cache_mode;
|
||||
return ocf_cache_mode_none;
|
||||
}
|
||||
|
||||
static inline bool ocf_user_part_is_prio_valid(int64_t prio)
|
||||
{
|
||||
switch (prio) {
|
||||
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
|
||||
case OCF_IO_CLASS_PRIO_PINNED:
|
||||
return true;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* routine checks for validity of a partition name.
|
||||
*
|
||||
* Following condition is checked:
|
||||
* - string too long
|
||||
* - string containing invalid characters (outside of low ascii)
|
||||
* Following condition is NOT cheched:
|
||||
* - empty string. (empty string is NOT a valid partition name, but
|
||||
* this function returns true on empty string nevertheless).
|
||||
*
|
||||
* @return returns true if partition name is a valid name
|
||||
*/
|
||||
static inline bool ocf_user_part_is_name_valid(const char *name)
|
||||
{
|
||||
uint32_t length = 0;
|
||||
|
||||
while (*name) {
|
||||
if (*name < ' ' || *name > '~')
|
||||
return false;
|
||||
|
||||
if (',' == *name || '"' == *name)
|
||||
return false;
|
||||
|
||||
name++;
|
||||
length++;
|
||||
|
||||
if (length >= OCF_IO_CLASS_NAME_MAX)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __UTILS_PARTITION_H__ */
|
@ -30,7 +30,7 @@
|
||||
#include "alru.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_realloc.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
@ -49,7 +49,9 @@ static void cleaning_policy_alru_initialize_test01(void **state)
|
||||
print_test_description("Check if all variables are set correctly");
|
||||
|
||||
cache = test_malloc(sizeof(*cache));
|
||||
cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
|
||||
cache->user_parts[part_id].part.runtime = test_malloc(sizeof(struct ocf_part_runtime));
|
||||
cache->user_parts[part_id].clean_pol = test_malloc(sizeof(*cache->user_parts[part_id].clean_pol));
|
||||
cache->user_parts[part_id].part.id = part_id;
|
||||
cache->device = test_malloc(sizeof(struct ocf_cache_device));
|
||||
cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
|
||||
|
||||
@ -59,15 +61,16 @@ static void cleaning_policy_alru_initialize_test01(void **state)
|
||||
|
||||
assert_int_equal(result, 0);
|
||||
|
||||
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 0);
|
||||
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, collision_table_entries);
|
||||
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, collision_table_entries);
|
||||
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].clean_pol->policy.alru.size), 0);
|
||||
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_head, collision_table_entries);
|
||||
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_tail, collision_table_entries);
|
||||
|
||||
assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
|
||||
|
||||
test_free(cache->device->runtime_meta);
|
||||
test_free(cache->device);
|
||||
test_free(cache->user_parts[part_id].runtime);
|
||||
test_free(cache->user_parts[part_id].clean_pol);
|
||||
test_free(cache->user_parts[part_id].part.runtime);
|
||||
test_free(cache);
|
||||
}
|
||||
|
||||
@ -82,27 +85,29 @@ static void cleaning_policy_alru_initialize_test02(void **state)
|
||||
print_test_description("Check if only appropirate variables are changed");
|
||||
|
||||
cache = test_malloc(sizeof(*cache));
|
||||
cache->user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime));
|
||||
cache->user_parts[part_id].part.runtime = test_malloc(sizeof(struct ocf_part_runtime));
|
||||
cache->user_parts[part_id].clean_pol = test_malloc(sizeof(*cache->user_parts[part_id].clean_pol));
|
||||
cache->device = test_malloc(sizeof(struct ocf_cache_device));
|
||||
cache->device->runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime));
|
||||
|
||||
env_atomic_set(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size, 1);
|
||||
cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head = -collision_table_entries;
|
||||
cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail = -collision_table_entries;
|
||||
env_atomic_set(&cache->user_parts[part_id].clean_pol->policy.alru.size, 1);
|
||||
cache->user_parts[part_id].clean_pol->policy.alru.lru_head = -collision_table_entries;
|
||||
cache->user_parts[part_id].clean_pol->policy.alru.lru_tail = -collision_table_entries;
|
||||
|
||||
result = cleaning_policy_alru_initialize_part(cache, cache->user_parts[part_id], 0, 0);
|
||||
result = cleaning_policy_alru_initialize_part(cache, &cache->user_parts[part_id], 0, 0);
|
||||
|
||||
assert_int_equal(result, 0);
|
||||
|
||||
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].runtime->cleaning.policy.alru.size), 1);
|
||||
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_head, -collision_table_entries);
|
||||
assert_int_equal(cache->user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, -collision_table_entries);
|
||||
assert_int_equal(env_atomic_read(&cache->user_parts[part_id].clean_pol->policy.alru.size), 1);
|
||||
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_head, -collision_table_entries);
|
||||
assert_int_equal(cache->user_parts[part_id].clean_pol->policy.alru.lru_tail, -collision_table_entries);
|
||||
|
||||
assert_int_equal(cache->device->runtime_meta->cleaning_thread_access, 0);
|
||||
|
||||
test_free(cache->device->runtime_meta);
|
||||
test_free(cache->device);
|
||||
test_free(cache->user_parts[part_id].runtime);
|
||||
test_free(cache->user_parts[part_id].clean_pol);
|
||||
test_free(cache->user_parts[part_id].part.runtime);
|
||||
test_free(cache);
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../ocf_request.h"
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../eviction/eviction.h"
|
||||
#include "../promotion/promotion.h"
|
||||
@ -49,7 +49,7 @@ void __wrap_ocf_req_hash_unlock_wr(struct ocf_request *req)
|
||||
{
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_part_has_space(struct ocf_request *req)
|
||||
uint32_t __wrap_ocf_user_part_has_space(struct ocf_request *req)
|
||||
{
|
||||
return mock();
|
||||
}
|
||||
@ -71,7 +71,7 @@ void __wrap_ocf_metadata_end_exclusive_access(
|
||||
{
|
||||
}
|
||||
|
||||
bool __wrap_ocf_part_is_enabled(struct ocf_user_part *target_part)
|
||||
bool __wrap_ocf_user_part_is_enabled(struct ocf_user_part *target_part)
|
||||
{
|
||||
return mock();
|
||||
}
|
||||
@ -107,7 +107,7 @@ static void ocf_prepare_clines_miss_test01(void **state)
|
||||
struct ocf_cache cache;
|
||||
struct ocf_request req = {.cache = &cache };
|
||||
print_test_description("Target part is disabled and empty\n");
|
||||
will_return(__wrap_ocf_part_is_enabled, false);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, false);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
}
|
||||
@ -120,7 +120,7 @@ static void ocf_prepare_clines_miss_test02(void **state)
|
||||
print_test_description("Target part is disabled but has cachelines assigned.\n");
|
||||
print_test_description("\tMark mapping error\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, false);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, false);
|
||||
expect_function_call(__wrap_ocf_req_set_mapping_error);
|
||||
|
||||
assert_int_equal(ocf_prepare_clines_miss(&req, NULL), -OCF_ERR_NO_LOCK);
|
||||
@ -134,8 +134,8 @@ static void ocf_prepare_clines_miss_test03(void **state)
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction is ok and cachelines lock is acquired.\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_user_part_has_space, false);
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return_always(__wrap_space_managment_evict_do, LOOKUP_INSERTED);
|
||||
|
||||
@ -155,8 +155,8 @@ static void ocf_prepare_clines_miss_test04(void **state)
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("\tEviction failed\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_user_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_MISS);
|
||||
@ -174,12 +174,12 @@ static void ocf_prepare_clines_miss_test06(void **state)
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, but failed to lock cachelines.\n");
|
||||
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_user_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
expect_function_call(__wrap_lock_clines);
|
||||
@ -198,12 +198,12 @@ static void ocf_prepare_clines_miss_test07(void **state)
|
||||
print_test_description("Target part is enabled but doesn't have enough space.\n");
|
||||
print_test_description("Eviction and mapping were ok, lock not acquired.\n");
|
||||
|
||||
will_return_always(__wrap_ocf_part_has_space, false);
|
||||
will_return_always(__wrap_ocf_user_part_has_space, false);
|
||||
|
||||
expect_function_call(__wrap_space_managment_evict_do);
|
||||
will_return(__wrap_space_managment_evict_do, LOOKUP_HIT);
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, true);
|
||||
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
||||
@ -221,8 +221,8 @@ static void ocf_prepare_clines_miss_test08(void **state)
|
||||
print_test_description("Target part is enabled has enough space.\n");
|
||||
print_test_description("\tMapping and cacheline lock are both ok\n");
|
||||
|
||||
will_return(__wrap_ocf_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_part_has_space, true);
|
||||
will_return(__wrap_ocf_user_part_is_enabled, true);
|
||||
will_return_always(__wrap_ocf_user_part_has_space, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_engine_map);
|
||||
will_return_always(__wrap_ocf_req_test_mapping_error, false);
|
||||
|
@ -2,7 +2,7 @@
|
||||
* <tested_file_path>src/eviction/eviction.c</tested_file_path>
|
||||
* <tested_function>ocf_evict_do</tested_function>
|
||||
* <functions_to_leave>
|
||||
ocf_evict_partitions
|
||||
ocf_evict_user_partitions
|
||||
* </functions_to_leave>
|
||||
*/
|
||||
|
||||
@ -19,16 +19,16 @@
|
||||
|
||||
#include "eviction.h"
|
||||
#include "ops.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
|
||||
#include "eviction/eviction.c/eviction_generated_wraps.c"
|
||||
|
||||
struct test_cache
|
||||
{
|
||||
struct ocf_cache cache;
|
||||
struct ocf_user_part_config part[OCF_IO_CLASS_MAX];
|
||||
uint32_t overflow[OCF_IO_CLASS_MAX];
|
||||
uint32_t evictable[OCF_IO_CLASS_MAX];
|
||||
struct ocf_user_part_config part[OCF_USER_IO_CLASS_MAX];
|
||||
uint32_t overflow[OCF_USER_IO_CLASS_MAX];
|
||||
uint32_t evictable[OCF_USER_IO_CLASS_MAX];
|
||||
uint32_t req_unmapped;
|
||||
};
|
||||
|
||||
@ -37,24 +37,24 @@ bool __wrap_ocf_eviction_can_evict(ocf_cache_t cache)
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_part_overflow_size(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part)
|
||||
uint32_t __wrap_ocf_user_part_overflow_size(struct ocf_cache *cache,
|
||||
struct ocf_user_part *user_part)
|
||||
{
|
||||
struct test_cache* tcache = cache;
|
||||
|
||||
return tcache->overflow[part->id];
|
||||
return tcache->overflow[user_part->part.id];
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_evict_calculate(ocf_cache_t cache,
|
||||
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
|
||||
struct ocf_user_part *user_part, uint32_t to_evict, bool roundup)
|
||||
{
|
||||
struct test_cache* tcache = cache;
|
||||
|
||||
return min(tcache->evictable[part->id], to_evict);
|
||||
return min(tcache->evictable[user_part->part.id], to_evict);
|
||||
}
|
||||
|
||||
uint32_t __wrap_ocf_eviction_need_space(struct ocf_cache *cache,
|
||||
ocf_queue_t io_queue, struct ocf_user_part *part,
|
||||
ocf_queue_t io_queue, struct ocf_part *part,
|
||||
uint32_t clines)
|
||||
{
|
||||
struct test_cache *tcache = (struct test_cache *)cache;
|
||||
@ -94,7 +94,7 @@ bool ocf_cache_is_device_attached(ocf_cache_t cache)
|
||||
|
||||
|
||||
/* FIXME: copy-pasted from OCF */
|
||||
int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
|
||||
{
|
||||
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
|
||||
@ -102,10 +102,9 @@ int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
|
||||
lst_valid);
|
||||
size_t p1_size = ocf_cache_is_device_attached(cache) ?
|
||||
p1->runtime->curr_size : 0;
|
||||
p1->part.runtime->curr_size : 0;
|
||||
size_t p2_size = ocf_cache_is_device_attached(cache) ?
|
||||
p2->runtime->curr_size : 0;
|
||||
|
||||
p2->part.runtime->curr_size : 0;
|
||||
int v1 = p1->config->priority;
|
||||
int v2 = p2->config->priority;
|
||||
|
||||
@ -154,6 +153,7 @@ int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
|
||||
return v2 - v1;
|
||||
}
|
||||
|
||||
|
||||
static struct ocf_lst_entry *_list_getter(
|
||||
struct ocf_cache *cache, ocf_cache_line_t idx)
|
||||
{
|
||||
@ -166,18 +166,18 @@ static void init_part_list(struct test_cache *tcache)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
tcache->cache.user_parts[i].id = i;
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
tcache->cache.user_parts[i].part.id = i;
|
||||
tcache->cache.user_parts[i].config = &tcache->part[i];
|
||||
tcache->cache.user_parts[i].config->priority = i+1;
|
||||
tcache->cache.user_parts[i].config->flags.eviction = 1;
|
||||
}
|
||||
|
||||
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.lst_part, OCF_IO_CLASS_MAX,
|
||||
_list_getter, ocf_part_lst_cmp_valid);
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
ocf_lst_init_entry(&tcache->cache.lst_part, &tcache->cache.user_parts[i].lst_valid);
|
||||
ocf_lst_add_tail(&tcache->cache.lst_part, i);
|
||||
ocf_lst_init((ocf_cache_t)tcache, &tcache->cache.user_part_list, OCF_USER_IO_CLASS_MAX,
|
||||
_list_getter, ocf_user_part_lst_cmp_valid);
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
ocf_lst_init_entry(&tcache->cache.user_part_list, &tcache->cache.user_parts[i].lst_valid);
|
||||
ocf_lst_add_tail(&tcache->cache.user_part_list, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,7 +190,7 @@ uint32_t __wrap_ocf_engine_unmapped_count(struct ocf_request *req)
|
||||
|
||||
#define _expect_evict_call(tcache, part_id, req_count, ret_count) \
|
||||
do { \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.cache.user_parts[part_id]); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, part, &tcache.cache.user_parts[part_id].part); \
|
||||
expect_value(__wrap_ocf_eviction_need_space, clines, req_count); \
|
||||
expect_function_call(__wrap_ocf_eviction_need_space); \
|
||||
will_return(__wrap_ocf_eviction_need_space, ret_count); \
|
||||
|
@ -199,7 +199,7 @@ unsigned current_case;
|
||||
|
||||
struct ocf_lru_list list;
|
||||
|
||||
struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *part,
|
||||
struct ocf_lru_list *__wrap_evp_lru_get_list(struct ocf_user_part *user_part,
|
||||
uint32_t evp, bool clean)
|
||||
{
|
||||
unsigned i = 0;
|
||||
|
@ -30,7 +30,7 @@ ocf_mngt_cache_mode_has_lazy_write
|
||||
#include "../ocf_queue_priv.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include "../ocf_queue_priv.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
#include "../utils/utils_io.h"
|
||||
#include "../utils/utils_cache_line.h"
|
||||
|
@ -31,14 +31,14 @@
|
||||
#include "../ocf_priv.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_user_part.h"
|
||||
#include "../eviction/ops.h"
|
||||
#include "ocf_env.h"
|
||||
|
||||
#include "mngt/ocf_mngt_io_class.c/ocf_mngt_io_class_generated_wraps.c"
|
||||
|
||||
/* Functions mocked for testing purposes */
|
||||
bool __wrap_ocf_part_is_added(struct ocf_user_part *part)
|
||||
bool __wrap_ocf_user_part_is_added(struct ocf_user_part *user_part)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
@ -51,20 +51,20 @@ int __wrap__ocf_mngt_set_partition_size(struct ocf_cache *cache,
|
||||
return mock();
|
||||
}
|
||||
|
||||
void __wrap_ocf_part_set_prio(struct ocf_cache *cache,
|
||||
struct ocf_user_part *part, int16_t prio)
|
||||
void __wrap_ocf_user_part_set_prio(struct ocf_cache *cache,
|
||||
struct ocf_user_part *user_part, int16_t prio)
|
||||
{
|
||||
function_called();
|
||||
}
|
||||
|
||||
bool __wrap_ocf_part_is_valid(struct ocf_user_part *part)
|
||||
bool __wrap_ocf_user_part_is_valid(struct ocf_user_part *user_part)
|
||||
{
|
||||
function_called();
|
||||
return mock();
|
||||
}
|
||||
|
||||
|
||||
void __wrap_ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
void __wrap_ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
|
||||
bool valid)
|
||||
{
|
||||
function_called();
|
||||
@ -79,7 +79,7 @@ int __wrap__ocf_mngt_io_class_validate_cfg(ocf_cache_t cache,
|
||||
return mock();
|
||||
}
|
||||
|
||||
void __wrap_ocf_part_sort(struct ocf_cache *cache)
|
||||
void __wrap_ocf_user_part_sort(struct ocf_cache *cache)
|
||||
{
|
||||
function_called();
|
||||
}
|
||||
@ -93,7 +93,7 @@ static inline void setup_valid_config(struct ocf_mngt_io_class_config *cfg,
|
||||
bool remove)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
cfg[i].class_id = i;
|
||||
cfg[i].name = remove ? NULL : i == 0 ? "unclassified" :"test_io_class_name" ;
|
||||
cfg[i].prio = i;
|
||||
@ -112,7 +112,7 @@ static void ocf_mngt_io_classes_configure_test03(void **state)
|
||||
|
||||
cache = test_malloc(sizeof(*cache));
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
cache->user_parts[i].config =
|
||||
test_malloc(sizeof(struct ocf_user_part_config));
|
||||
}
|
||||
@ -120,30 +120,30 @@ static void ocf_mngt_io_classes_configure_test03(void **state)
|
||||
|
||||
setup_valid_config(cfg.config, true);
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
|
||||
will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
|
||||
}
|
||||
|
||||
/* Removing default io_class is not allowed */
|
||||
for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap_ocf_part_is_valid);
|
||||
will_return(__wrap_ocf_part_is_valid, 1);
|
||||
for (i = 1; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap_ocf_user_part_is_valid);
|
||||
will_return(__wrap_ocf_user_part_is_valid, 1);
|
||||
|
||||
expect_function_call(__wrap_ocf_part_set_valid);
|
||||
expect_function_call(__wrap_ocf_user_part_set_valid);
|
||||
/* Test assumes default partition has id equal 0 */
|
||||
expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN + 1,
|
||||
expect_in_range(__wrap_ocf_user_part_set_valid, id, OCF_IO_CLASS_ID_MIN + 1,
|
||||
OCF_IO_CLASS_ID_MAX);
|
||||
expect_value(__wrap_ocf_part_set_valid, valid, false);
|
||||
expect_value(__wrap_ocf_user_part_set_valid, valid, false);
|
||||
}
|
||||
|
||||
expect_function_call(__wrap_ocf_part_sort);
|
||||
expect_function_call(__wrap_ocf_user_part_sort);
|
||||
|
||||
result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
|
||||
|
||||
assert_int_equal(result, 0);
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
|
||||
test_free(cache->user_parts[i].config);
|
||||
|
||||
test_free(cache);
|
||||
@ -157,7 +157,7 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
|
||||
|
||||
cache = test_malloc(sizeof(*cache));
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
cache->user_parts[i].config =
|
||||
test_malloc(sizeof(struct ocf_user_part_config));
|
||||
}
|
||||
@ -169,46 +169,46 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
|
||||
|
||||
print_test_description("Configure all possible io classes");
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++) {
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap__ocf_mngt_io_class_validate_cfg);
|
||||
will_return(__wrap__ocf_mngt_io_class_validate_cfg, 0);
|
||||
}
|
||||
|
||||
/* Configure default io_class */
|
||||
expect_function_call(__wrap_ocf_part_is_added);
|
||||
will_return(__wrap_ocf_part_is_added, 1);
|
||||
expect_function_call(__wrap_ocf_user_part_is_added);
|
||||
will_return(__wrap_ocf_user_part_is_added, 1);
|
||||
|
||||
expect_function_call(__wrap__ocf_mngt_set_partition_size);
|
||||
will_return(__wrap__ocf_mngt_set_partition_size, 0);
|
||||
|
||||
expect_function_call(__wrap_ocf_part_set_prio);
|
||||
expect_function_call(__wrap_ocf_user_part_set_prio);
|
||||
|
||||
/* Configure custom io_classes */
|
||||
for (i = 1; i < OCF_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap_ocf_part_is_added);
|
||||
will_return(__wrap_ocf_part_is_added, 1);
|
||||
for (i = 1; i < OCF_USER_IO_CLASS_MAX; i++) {
|
||||
expect_function_call(__wrap_ocf_user_part_is_added);
|
||||
will_return(__wrap_ocf_user_part_is_added, 1);
|
||||
|
||||
expect_function_call(__wrap__ocf_mngt_set_partition_size);
|
||||
will_return(__wrap__ocf_mngt_set_partition_size, 0);
|
||||
|
||||
expect_function_call(__wrap_ocf_part_is_valid);
|
||||
will_return(__wrap_ocf_part_is_valid, 0);
|
||||
expect_function_call(__wrap_ocf_user_part_is_valid);
|
||||
will_return(__wrap_ocf_user_part_is_valid, 0);
|
||||
|
||||
expect_function_call(__wrap_ocf_part_set_valid);
|
||||
expect_in_range(__wrap_ocf_part_set_valid, id, OCF_IO_CLASS_ID_MIN,
|
||||
expect_function_call(__wrap_ocf_user_part_set_valid);
|
||||
expect_in_range(__wrap_ocf_user_part_set_valid, id, OCF_IO_CLASS_ID_MIN,
|
||||
OCF_IO_CLASS_ID_MAX);
|
||||
expect_value(__wrap_ocf_part_set_valid, valid, true);
|
||||
expect_value(__wrap_ocf_user_part_set_valid, valid, true);
|
||||
|
||||
expect_function_call(__wrap_ocf_part_set_prio);
|
||||
expect_function_call(__wrap_ocf_user_part_set_prio);
|
||||
}
|
||||
|
||||
expect_function_call(__wrap_ocf_part_sort);
|
||||
expect_function_call(__wrap_ocf_user_part_sort);
|
||||
|
||||
result = ocf_mngt_cache_io_classes_configure(cache, &cfg);
|
||||
|
||||
assert_int_equal(result, 0);
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
|
||||
for (i = 0; i < OCF_USER_IO_CLASS_MAX; i++)
|
||||
test_free(cache->user_parts[i].config);
|
||||
|
||||
test_free(cache);
|
||||
@ -217,7 +217,7 @@ static void ocf_mngt_io_classes_configure_test02(void **state)
|
||||
static void ocf_mngt_io_classes_configure_test01(void **state)
|
||||
{
|
||||
struct ocf_cache *cache;
|
||||
struct ocf_mngt_io_classes_config cfg[OCF_IO_CLASS_MAX];
|
||||
struct ocf_mngt_io_classes_config cfg[OCF_USER_IO_CLASS_MAX];
|
||||
int error_code = -OCF_ERR_INVAL;
|
||||
int result;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user