Move common user and freelist partition data to a new struct

New structure ocf_part is added to contain all the data common for both
user partitions and freelist partition: part_runtime and part_id.
ocf_user_part now contains ocf_part structure as well as pointer to
cleaning partition runtime metadata (moved out from part_runtime) and
user partition config (no change here).

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2021-06-14 17:05:42 +02:00
parent c0b76f9e01
commit 87f834c793
48 changed files with 561 additions and 563 deletions

View File

@@ -102,7 +102,7 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
{
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
struct ocf_user_part *part = &cache->user_parts[part_id];
struct ocf_part *part = &cache->user_parts[part_id].part;
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
bool line_is_clean;
@@ -131,8 +131,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
env_atomic_dec(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].clean_cline))
evict_policy_ops[evp_type].clean_cline(cache, part, line);
if (likely(evict_policy_ops[evp_type].clean_cline)) {
evict_policy_ops[evp_type].clean_cline(cache,
part, line);
}
ocf_purge_cleaning_policy(cache, line);
}
@@ -145,7 +147,7 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
{
ocf_cache_line_t line = req->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
struct ocf_user_part *part = &cache->user_parts[part_id];
struct ocf_part *part = &cache->user_parts[part_id].part;
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
bool line_was_dirty;
@@ -174,8 +176,10 @@ void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
env_atomic_inc(&req->core->runtime_meta->
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].dirty_cline))
evict_policy_ops[evp_type].dirty_cline(cache, part, line);
if (likely(evict_policy_ops[evp_type].dirty_cline)) {
evict_policy_ops[evp_type].dirty_cline(cache,
part, line);
}
}
}

View File

@@ -9,7 +9,7 @@
#include "../concurrency/ocf_concurrency.h"
#include "../ocf_request.h"
#include "utils_cleaner.h"
#include "utils_part.h"
#include "utils_user_part.h"
#include "utils_io.h"
#include "utils_cache_line.h"
#include "../ocf_queue_priv.h"
@@ -1052,7 +1052,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
struct ocf_user_part *curr_part;
ocf_part_id_t part_id;
for_each_part(cache, curr_part, part_id)
for_each_user_part(cache, curr_part, part_id)
ocf_refcnt_freeze(&curr_part->cleaning.counter);
}
@@ -1061,7 +1061,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
struct ocf_user_part *curr_part;
ocf_part_id_t part_id;
for_each_part(cache, curr_part, part_id)
for_each_user_part(cache, curr_part, part_id)
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
}
@@ -1084,7 +1084,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
ctx->cb = cb;
ctx->priv = priv;
for_each_part(cache, curr_part, part_id) {
for_each_user_part(cache, curr_part, part_id) {
env_atomic_inc(&ctx->waiting);
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);

View File

@@ -1,180 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_PARTITION_H__
#define __UTILS_PARTITION_H__
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_part_init(struct ocf_cache *cache);
static inline bool ocf_part_is_valid(struct ocf_user_part *part)
{
return !!part->config->flags.valid;
}
static inline void ocf_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *part, int16_t prio)
{
if (part->config->priority != prio)
part->config->priority = prio;
}
static inline int16_t ocf_part_get_prio(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->priority;
return OCF_IO_CLASS_PRIO_LOWEST;
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid);
static inline bool ocf_part_is_added(struct ocf_user_part *part)
{
return !!part->config->flags.added;
}
static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
{
if (class < OCF_IO_CLASS_MAX)
if (cache->user_parts[class].config->flags.valid)
return class;
return PARTITION_DEFAULT;
}
static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part)
{
return part->runtime->curr_size;
}
static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size;
ioclass_size = (uint64_t)part->config->min_size *
(uint64_t)cache->conf_meta->cachelines;
ioclass_size /= 100;
return (uint32_t)ioclass_size;
}
static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size, max_size, cache_size;
max_size = part->config->max_size;
cache_size = cache->conf_meta->cachelines;
ioclass_size = max_size * cache_size;
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
return (uint32_t)ioclass_size;
}
void ocf_part_move(struct ocf_request *req);
#define for_each_part(cache, part, id) \
for_each_lst_entry(&cache->lst_part, part, id, \
struct ocf_user_part, lst_valid)
static inline void ocf_part_sort(struct ocf_cache *cache)
{
ocf_lst_sort(&cache->lst_part);
}
static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
{
return part->config->max_size != 0;
}
static inline uint32_t ocf_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *part)
{
uint32_t part_occupancy = ocf_part_get_occupancy(part);
uint32_t part_occupancy_limit = ocf_part_get_max_size(cache, part);
if (part_occupancy > part_occupancy_limit)
return part_occupancy - part_occupancy_limit;
return 0;
}
static inline bool ocf_part_has_space(struct ocf_request *req)
{
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
uint64_t part_occupancy_limit =
ocf_part_get_max_size(req->cache, target_part);
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
uint64_t part_occupancy = ocf_part_get_occupancy(target_part);
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
}
static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->cache_mode;
return ocf_cache_mode_none;
}
static inline bool ocf_part_is_prio_valid(int64_t prio)
{
switch (prio) {
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
case OCF_IO_CLASS_PRIO_PINNED:
return true;
default:
return false;
}
}
/**
* routine checks for validity of a partition name.
*
* Following condition is checked:
* - string too long
* - string containing invalid characters (outside of low ascii)
* Following condition is NOT cheched:
* - empty string. (empty string is NOT a valid partition name, but
* this function returns true on empty string nevertheless).
*
* @return returns true if partition name is a valid name
*/
static inline bool ocf_part_is_name_valid(const char *name)
{
uint32_t length = 0;
while (*name) {
if (*name < ' ' || *name > '~')
return false;
if (',' == *name || '"' == *name)
return false;
name++;
length++;
if (length >= OCF_IO_CLASS_NAME_MAX)
return false;
}
return true;
}
#endif /* __UTILS_PARTITION_H__ */

View File

@@ -9,17 +9,17 @@
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../eviction/ops.h"
#include "utils_part.h"
#include "utils_user_part.h"
static struct ocf_lst_entry *ocf_part_lst_getter_valid(
static struct ocf_lst_entry *ocf_user_part_lst_getter_valid(
struct ocf_cache *cache, ocf_cache_line_t idx)
{
ENV_BUG_ON(idx > OCF_IO_CLASS_MAX);
ENV_BUG_ON(idx > OCF_USER_IO_CLASS_MAX);
return &cache->user_parts[idx].lst_valid;
}
static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
static int ocf_user_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
{
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
@@ -27,10 +27,9 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->runtime->curr_size : 0;
p1->part.runtime->curr_size : 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->runtime->curr_size : 0;
p2->part.runtime->curr_size : 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;
@@ -79,13 +78,14 @@ static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
return v2 - v1;
}
void ocf_part_init(struct ocf_cache *cache)
void ocf_user_part_init(struct ocf_cache *cache)
{
ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX,
ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid);
ocf_lst_init(cache, &cache->user_part_list, OCF_USER_IO_CLASS_MAX,
ocf_user_part_lst_getter_valid,
ocf_user_part_lst_cmp_valid);
}
void ocf_part_move(struct ocf_request *req)
void ocf_user_part_move(struct ocf_request *req)
{
struct ocf_cache *cache = req->cache;
struct ocf_map_info *entry;
@@ -117,8 +117,8 @@ void ocf_part_move(struct ocf_request *req)
id_old = ocf_metadata_get_partition_id(cache, line);
id_new = req->part_id;
ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
id_new >= OCF_IO_CLASS_MAX);
ENV_BUG_ON(id_old >= OCF_USER_IO_CLASS_MAX ||
id_new >= OCF_USER_IO_CLASS_MAX);
if (id_old == id_new) {
/* Partition of the request and cache line is the same,
@@ -175,22 +175,23 @@ void ocf_part_move(struct ocf_request *req)
}
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid)
{
struct ocf_user_part *part = &cache->user_parts[id];
struct ocf_user_part *user_part = &cache->user_parts[id];
if (valid ^ part->config->flags.valid) {
if (valid ^ user_part->config->flags.valid) {
if (valid) {
part->config->flags.valid = true;
user_part->config->flags.valid = true;
cache->conf_meta->valid_parts_no++;
} else {
part->config->flags.valid = false;
user_part->config->flags.valid = false;
cache->conf_meta->valid_parts_no--;
part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
part->config->min_size = 0;
part->config->max_size = PARTITION_SIZE_MAX;
ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name),
user_part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
user_part->config->min_size = 0;
user_part->config->max_size = PARTITION_SIZE_MAX;
ENV_BUG_ON(env_strncpy(user_part->config->name,
sizeof(user_part->config->name),
"Inactive", 9));
}
}

181
src/utils/utils_user_part.h Normal file
View File

@@ -0,0 +1,181 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_PARTITION_H__
#define __UTILS_PARTITION_H__
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_user_part_init(struct ocf_cache *cache);
static inline bool ocf_user_part_is_valid(struct ocf_user_part *user_part)
{
return !!user_part->config->flags.valid;
}
static inline void ocf_user_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *user_part, int16_t prio)
{
if (user_part->config->priority != prio)
user_part->config->priority = prio;
}
static inline int16_t ocf_user_part_get_prio(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_USER_IO_CLASS_MAX)
return cache->user_parts[part_id].config->priority;
return OCF_IO_CLASS_PRIO_LOWEST;
}
void ocf_user_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid);
static inline bool ocf_user_part_is_added(struct ocf_user_part *user_part)
{
return !!user_part->config->flags.added;
}
static inline ocf_part_id_t ocf_user_part_class2id(ocf_cache_t cache, uint64_t class)
{
if (class < OCF_USER_IO_CLASS_MAX)
if (cache->user_parts[class].config->flags.valid)
return class;
return PARTITION_DEFAULT;
}
static inline uint32_t ocf_part_get_occupancy(struct ocf_part *part)
{
return part->runtime->curr_size;
}
static inline uint32_t ocf_user_part_get_min_size(ocf_cache_t cache,
struct ocf_user_part *user_part)
{
uint64_t ioclass_size;
ioclass_size = (uint64_t)user_part->config->min_size *
(uint64_t)cache->conf_meta->cachelines;
ioclass_size /= 100;
return (uint32_t)ioclass_size;
}
static inline uint32_t ocf_user_part_get_max_size(ocf_cache_t cache,
struct ocf_user_part *user_part)
{
uint64_t ioclass_size, max_size, cache_size;
max_size = user_part->config->max_size;
cache_size = cache->conf_meta->cachelines;
ioclass_size = max_size * cache_size;
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
return (uint32_t)ioclass_size;
}
void ocf_user_part_move(struct ocf_request *req);
#define for_each_user_part(cache, user_part, id) \
for_each_lst_entry(&cache->user_part_list, user_part, id, \
struct ocf_user_part, lst_valid)
static inline void ocf_user_part_sort(struct ocf_cache *cache)
{
ocf_lst_sort(&cache->user_part_list);
}
static inline bool ocf_user_part_is_enabled(struct ocf_user_part *user_part)
{
return user_part->config->max_size != 0;
}
static inline uint32_t ocf_user_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *user_part)
{
uint32_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
uint32_t part_occupancy_limit = ocf_user_part_get_max_size(cache,
user_part);
if (part_occupancy > part_occupancy_limit)
return part_occupancy - part_occupancy_limit;
return 0;
}
static inline bool ocf_user_part_has_space(struct ocf_request *req)
{
struct ocf_user_part *user_part = &req->cache->user_parts[req->part_id];
uint64_t part_occupancy_limit =
ocf_user_part_get_max_size(req->cache, user_part);
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
uint64_t part_occupancy = ocf_part_get_occupancy(&user_part->part);
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
}
static inline ocf_cache_mode_t ocf_user_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_USER_IO_CLASS_MAX)
return cache->user_parts[part_id].config->cache_mode;
return ocf_cache_mode_none;
}
static inline bool ocf_user_part_is_prio_valid(int64_t prio)
{
switch (prio) {
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
case OCF_IO_CLASS_PRIO_PINNED:
return true;
default:
return false;
}
}
/**
* routine checks for validity of a partition name.
*
* Following condition is checked:
* - string too long
* - string containing invalid characters (outside of low ascii)
* Following condition is NOT cheched:
* - empty string. (empty string is NOT a valid partition name, but
* this function returns true on empty string nevertheless).
*
* @return returns true if partition name is a valid name
*/
static inline bool ocf_user_part_is_name_valid(const char *name)
{
uint32_t length = 0;
while (*name) {
if (*name < ' ' || *name > '~')
return false;
if (',' == *name || '"' == *name)
return false;
name++;
length++;
if (length >= OCF_IO_CLASS_NAME_MAX)
return false;
}
return true;
}
#endif /* __UTILS_PARTITION_H__ */