Merge pull request #413 from mmichal10/occ-per-ioclass

Occupancy per ioclass
This commit is contained in:
Robert Baldyga
2020-12-21 23:43:54 +01:00
committed by GitHub
25 changed files with 1725 additions and 166 deletions

View File

@@ -14,6 +14,7 @@
#include "../utils/utils_cache_line.h"
#include "../ocf_request.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../promotion/promotion.h"
@@ -127,7 +128,8 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
/*
* Need to move this cache line into other partition
*/
_entry->re_part = req->info.re_part = true;
_entry->re_part = true;
req->info.re_part_no++;
}
break;
@@ -254,7 +256,7 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
ocf_cleaning_t clean_policy_type;
if (!ocf_freelist_get_cache_line(cache->freelist, cache_line)) {
req->info.mapping_error = 1;
ocf_req_set_mapping_error(req);
return;
}
@@ -334,7 +336,7 @@ static void ocf_engine_map(struct ocf_request *req)
if (ocf_engine_unmapped_count(req) >
ocf_freelist_num_free(cache->freelist)) {
req->info.mapping_error = 1;
ocf_req_set_mapping_error(req);
return;
}
@@ -353,7 +355,7 @@ static void ocf_engine_map(struct ocf_request *req)
ocf_engine_map_cache_line(req, entry->core_line,
entry->hash, &entry->coll_idx);
if (req->info.mapping_error) {
if (ocf_req_test_mapping_error(req)) {
/*
* Eviction error (mapping error), need to
* clean, return and do pass through
@@ -375,7 +377,7 @@ static void ocf_engine_map(struct ocf_request *req)
}
if (!req->info.mapping_error) {
if (!ocf_req_test_mapping_error(req)) {
/* request has been inserted into cache - purge it from promotion
* policy */
ocf_promotion_req_purge(cache->promotion_policy, req);
@@ -408,15 +410,6 @@ static void _ocf_engine_clean_end(void *private_data, int error)
}
}
static int ocf_engine_evict(struct ocf_request *req)
{
if (!ocf_engine_unmapped_count(req))
return 0;
return space_managment_evict_do(req->cache, req,
ocf_engine_unmapped_count(req));
}
static int lock_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
@@ -432,13 +425,139 @@ static int lock_clines(struct ocf_request *req,
}
}
static inline int ocf_prepare_clines_hit(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
int lock_status = -OCF_ERR_NO_LOCK;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
uint32_t clines_to_evict;
int res;
/* Cachelines are mapped in correct partition */
if (ocf_part_is_enabled(&req->cache->user_parts[req->part_id]) &&
!ocf_engine_needs_repart(req)) {
lock_status = lock_clines(req, engine_cbs);
ocf_req_hash_unlock_rd(req);
return lock_status;
}
res = ocf_part_check_space(req, &clines_to_evict);
if (res == OCF_PART_HAS_SPACE)
lock_status = lock_clines(req, engine_cbs);
/* Since target part is empty and disabled, request should be submited in
* pass-through */
if (res == OCF_PART_IS_DISABLED)
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
if (res != OCF_PART_IS_FULL)
return lock_status;
ocf_metadata_start_exclusive_access(metadata_lock);
ocf_part_check_space(req, &clines_to_evict);
if (space_managment_evict_do(req->cache, req, clines_to_evict) ==
LOOKUP_MISS) {
ocf_req_set_mapping_error(req);
goto unlock;
}
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
/* Target part is disabled but had some cachelines assigned. Submit
* request in pass-through after eviction has been made */
ocf_req_set_mapping_error(req);
goto unlock;
}
lock_status = lock_clines(req, engine_cbs);
unlock:
ocf_metadata_end_exclusive_access(metadata_lock);
return lock_status;
}
static inline int ocf_prepare_clines_miss(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
int lock_status = -OCF_ERR_NO_LOCK;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
uint32_t clines_to_evict = 0;
int res;
/* Mapping must be performed holding (at least) hash-bucket write lock */
ocf_req_hash_lock_upgrade(req);
/* Verify whether partition occupancy threshold is not reached yet or cache
* is not out of free cachelines */
res = ocf_part_check_space(req, &clines_to_evict);
if (res == OCF_PART_IS_DISABLED) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_wr(req);
return lock_status;
}
if (res == OCF_PART_HAS_SPACE) {
ocf_engine_map(req);
if (ocf_req_test_mapping_error(req)) {
goto eviction;
}
lock_status = lock_clines(req, engine_cbs);
if (lock_status < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
ocf_req_hash_unlock_wr(req);
return lock_status;
}
eviction:
ocf_req_hash_unlock_wr(req);
ocf_metadata_start_exclusive_access(metadata_lock);
ocf_part_check_space(req, &clines_to_evict);
if (space_managment_evict_do(req->cache, req, clines_to_evict) ==
LOOKUP_MISS) {
ocf_req_set_mapping_error(req);
goto unlock;
}
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
/* Partition is disabled but it had cachelines assigned. Now, that they
* are evicted, don't try to map cachelines - we don't want to insert
* new cachelines - the request should be submited in pass through mode
* instead */
ocf_req_set_mapping_error(req);
goto unlock;
}
ocf_engine_map(req);
if (ocf_req_test_mapping_error(req))
goto unlock;
lock_status = lock_clines(req, engine_cbs);
if (lock_status < 0)
ocf_req_set_mapping_error(req);
unlock:
ocf_metadata_end_exclusive_access(metadata_lock);
return lock_status;
}
int ocf_engine_prepare_clines(struct ocf_request *req,
const struct ocf_engine_callbacks *engine_cbs)
{
bool mapped;
bool promote = true;
int lock = -ENOENT;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
int lock = -OCF_ERR_NO_LOCK;
/* Calculate hashes for hash-bucket locking */
ocf_req_hash(req);
@@ -452,50 +571,19 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
/* Request cachelines are already mapped, acquire cacheline
* lock */
lock = lock_clines(req, engine_cbs);
} else {
/* check if request should promote cachelines */
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
if (!promote)
req->info.mapping_error = 1;
}
if (mapped)
return ocf_prepare_clines_hit(req, engine_cbs);
if (mapped || !promote) {
/* Will not attempt mapping - release hash bucket lock */
/* check if request should promote cachelines */
promote = ocf_promotion_req_should_promote(
req->cache->promotion_policy, req);
if (!promote) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
} else {
/* Need to map (potentially evict) cachelines. Mapping must be
* performed holding (at least) hash-bucket write lock */
ocf_req_hash_lock_upgrade(req);
ocf_engine_map(req);
if (!req->info.mapping_error)
lock = lock_clines(req, engine_cbs);
ocf_req_hash_unlock_wr(req);
if (req->info.mapping_error) {
/* Not mapped - evict cachelines under global exclusive
* lock*/
ocf_metadata_start_exclusive_access(metadata_lock);
/* Now there is exclusive access for metadata. May
* traverse once again and evict cachelines if needed.
*/
if (ocf_engine_evict(req) == LOOKUP_MAPPED)
ocf_engine_map(req);
if (!req->info.mapping_error)
lock = lock_clines(req, engine_cbs);
ocf_metadata_end_exclusive_access(metadata_lock);
}
return lock;
}
return lock;
return ocf_prepare_clines_miss(req, engine_cbs);
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,

View File

@@ -47,6 +47,20 @@ static inline bool ocf_engine_is_hit(struct ocf_request *req)
*/
#define ocf_engine_is_miss(req) (!ocf_engine_is_hit(req))
/**
* @brief Check if all the cache lines are assigned to a good partition
*
* @param req OCF request
*
* @retval true request's cache lines are assigned to a good partition
* @retval false some of the request's cache lines needs to be reassigned to
* a target partition
*/
static inline bool ocf_engine_needs_repart(struct ocf_request *req)
{
return req->info.re_part_no > 0;
}
/**
* @brief Check if all cache lines are mapped fully
*
@@ -98,6 +112,18 @@ static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *req)
return req->core_line_count - (req->info.hit_no + req->info.invalid_no);
}
/**
* @brief Get number of cache lines to repart
*
* @param req OCF request
*
* @retval Number of cache lines to repart
*/
static inline uint32_t ocf_engine_repart_count(struct ocf_request *req)
{
return req->info.re_part_no;
}
/**
* @brief Get number of IOs to perform cache read or write
*

View File

@@ -69,7 +69,7 @@ static int _ocf_read_fast_do(struct ocf_request *req)
/* Get OCF request - increase reference counter */
ocf_req_get(req);
if (req->info.re_part) {
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -108,6 +108,7 @@ int ocf_read_fast(struct ocf_request *req)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
bool part_has_space = false;
/* Get OCF request - increase reference counter */
ocf_req_get(req);
@@ -124,14 +125,18 @@ int ocf_read_fast(struct ocf_request *req)
ocf_engine_traverse(req);
hit = ocf_engine_is_hit(req);
if (hit) {
if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
part_has_space = true;
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
if (hit) {
if (hit && part_has_space) {
OCF_DEBUG_RQ(req, "Fast path success");
if (lock >= 0) {
@@ -154,10 +159,7 @@ int ocf_read_fast(struct ocf_request *req)
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
if (hit)
return OCF_FAST_PATH_YES;
else
return OCF_FAST_PATH_NO;
return (hit && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}
/* __ __ _ _ ______ _ _____ _ _
@@ -177,6 +179,7 @@ int ocf_write_fast(struct ocf_request *req)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
int part_has_space = false;
/* Get OCF request - increase reference counter */
ocf_req_get(req);
@@ -193,14 +196,18 @@ int ocf_write_fast(struct ocf_request *req)
ocf_engine_traverse(req);
mapped = ocf_engine_is_mapped(req);
if (mapped) {
if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
part_has_space = true;
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
if (mapped) {
if (mapped && part_has_space) {
if (lock >= 0) {
OCF_DEBUG_RQ(req, "Fast path success");
@@ -223,6 +230,5 @@ int ocf_write_fast(struct ocf_request *req)
/* Put OCF request - decrease reference counter */
ocf_req_put(req);
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
return (mapped && part_has_space) ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}

View File

@@ -67,7 +67,7 @@ int ocf_read_pt_do(struct ocf_request *req)
return 0;
}
if (req->info.re_part) {
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);

View File

@@ -172,7 +172,7 @@ static int _ocf_read_generic_do(struct ocf_request *req)
ocf_req_hash_unlock_rd(req);
}
if (req->info.re_part) {
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -243,7 +243,7 @@ int ocf_read_generic(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_rd_engine_callbacks);
if (!req->info.mapping_error) {
if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */

View File

@@ -121,7 +121,7 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
* 3. Then continue processing request (flush metadata)
*/
if (req->info.re_part) {
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -189,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_wb_engine_callbacks);
if (!req->info.mapping_error) {
if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */

View File

@@ -118,7 +118,7 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
ocf_req_hash_unlock_wr(req);
}
if (req->info.re_part) {
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
@@ -183,7 +183,7 @@ int ocf_write_wt(struct ocf_request *req)
lock = ocf_engine_prepare_clines(req, &_wt_engine_callbacks);
if (!req->info.mapping_error) {
if (!ocf_req_test_mapping_error(req)) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */

View File

@@ -20,10 +20,14 @@ struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
},
};
static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
uint32_t to_evict)
static uint32_t ocf_evict_calculate(ocf_cache_t cache,
struct ocf_user_part *part, uint32_t to_evict, bool roundup)
{
if (part->runtime->curr_size <= part->config->min_size) {
uint32_t curr_part_size = ocf_part_get_occupancy(part);
uint32_t min_part_size = ocf_part_get_min_size(cache, part);
if (curr_part_size <= min_part_size) {
/*
* Cannot evict from this partition because current size
* is less than minimum size
@@ -31,15 +35,31 @@ static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
return 0;
}
if (to_evict < OCF_TO_EVICTION_MIN)
if (roundup && to_evict < OCF_TO_EVICTION_MIN)
to_evict = OCF_TO_EVICTION_MIN;
if (to_evict > (part->runtime->curr_size - part->config->min_size))
to_evict = part->runtime->curr_size - part->config->min_size;
if (to_evict > (curr_part_size - min_part_size))
to_evict = curr_part_size - min_part_size;
return to_evict;
}
static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
ocf_queue_t io_queue, const uint32_t evict_cline_no,
struct ocf_user_part *target_part)
{
uint32_t to_evict = 0;
if (!evp_lru_can_evict(cache))
return 0;
to_evict = ocf_evict_calculate(cache, target_part, evict_cline_no,
false);
return ocf_eviction_need_space(cache, io_queue,
target_part, to_evict);
}
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
ocf_queue_t io_queue, const uint32_t evict_cline_no,
struct ocf_user_part *target_part)
@@ -67,16 +87,13 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
/* It seams that no more partition for eviction */
break;
}
if (part_id == target_part->id) {
/* Omit targeted, evict from different first */
continue;
}
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop */
goto out;
}
to_evict = ocf_evict_calculate(part, evict_cline_no);
to_evict = ocf_evict_calculate(cache, part, evict_cline_no,
true);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
continue;
@@ -86,18 +103,6 @@ static inline uint32_t ocf_evict_do(ocf_cache_t cache,
part, to_evict);
}
if (!ocf_eviction_can_evict(cache))
goto out;
if (evicted < evict_cline_no) {
/* Now we can evict form targeted partition */
to_evict = ocf_evict_calculate(target_part, evict_cline_no);
if (to_evict) {
evicted += ocf_eviction_need_space(cache, io_queue,
target_part, to_evict);
}
}
out:
return evicted;
}
@@ -109,16 +114,22 @@ int space_managment_evict_do(struct ocf_cache *cache,
uint32_t free;
struct ocf_user_part *req_part = &cache->user_parts[req->part_id];
free = ocf_freelist_num_free(cache->freelist);
if (evict_cline_no <= free)
return LOOKUP_MAPPED;
if (ocf_req_part_evict(req)) {
evicted = ocf_evict_part_do(cache, req->io_queue, evict_cline_no,
req_part);
} else {
free = ocf_freelist_num_free(cache->freelist);
if (evict_cline_no <= free)
return LOOKUP_MAPPED;
evict_cline_no -= free;
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
evict_cline_no -= free;
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, req_part);
}
if (evict_cline_no <= evicted)
return LOOKUP_MAPPED;
req->info.mapping_error |= true;
ocf_req_set_mapping_error(req);
return LOOKUP_MISS;
}

View File

@@ -58,11 +58,11 @@ struct eviction_policy_ops {
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
/*
* Deallocates space from low priority partitions.
* Deallocates space according to eviction priorities.
*
* Returns -1 on error
* or the destination partition ID for the free buffers
* (it matches label and is part of the object (#core_id) IO group)
* @returns:
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
* 'LOOKUP_MISS' otherwise
*/
int space_managment_evict_do(ocf_cache_t cache,
struct ocf_request *req, uint32_t evict_cline_no);

View File

@@ -11,7 +11,7 @@
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
#define PARTITION_SIZE_MAX 100
void ocf_metadata_get_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,

View File

@@ -11,26 +11,26 @@
#include "../eviction/eviction.h"
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
uint32_t max_size;
int16_t priority;
ocf_cache_mode_t cache_mode;
struct {
uint8_t valid : 1;
uint8_t added : 1;
uint8_t eviction : 1;
/*!< This bits is setting during partition sorting,
* and means that can evict from this partition
*/
} flags;
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
uint32_t max_size;
int16_t priority;
ocf_cache_mode_t cache_mode;
struct {
uint8_t valid : 1;
uint8_t added : 1;
uint8_t eviction : 1;
/*!< This bits is setting during partition sorting,
* and means that can evict from this partition
*/
} flags;
};
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
struct cleaning_policy cleaning;
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction[OCF_NUM_EVICTION_LISTS];
struct cleaning_policy cleaning;
};
/* Iterator state, visiting all eviction lists within a partition

View File

@@ -31,6 +31,8 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
uint32_t max_size, uint8_t priority, bool valid)
{
uint32_t size;
struct ocf_lst_entry *iter;
uint32_t iter_id;
if (!name)
return -OCF_ERR_INVAL;
@@ -41,6 +43,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
if (cache->user_parts[part_id].config->flags.valid)
return -OCF_ERR_INVAL;
if (min_size > max_size)
return -OCF_ERR_INVAL;
if (max_size > PARTITION_SIZE_MAX)
return -OCF_ERR_INVAL;
@@ -51,6 +56,14 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
return -OCF_ERR_INVAL;
}
for_each_lst(&cache->lst_part, iter, iter_id) {
if (iter_id == part_id) {
ocf_cache_log(cache, log_err,
"Part with id %hu already exists\n", part_id);
return -OCF_ERR_INVAL;
}
}
size = sizeof(cache->user_parts[part_id].config->name);
if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size))
return -OCF_ERR_INVAL;
@@ -77,8 +90,7 @@ static int _ocf_mngt_set_partition_size(struct ocf_cache *cache,
if (min > max)
return -OCF_ERR_INVAL;
if (_ocf_mngt_count_parts_min_size(cache) + min
>= cache->device->collision_table_entries) {
if (_ocf_mngt_count_parts_min_size(cache) + min > PARTITION_SIZE_MAX) {
/* Illegal configuration in which sum of all min_sizes exceeds
* cache size.
*/
@@ -126,17 +138,17 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
"Setting IO class size, id: %u, name: '%s' "
"[ ERROR ]\n", part_id, dest_part->config->name);
"Setting IO class size, id: %u, name: '%s', max size: %u%%"
" [ ERROR ]\n", part_id, dest_part->config->name, max);
return -OCF_ERR_INVAL;
}
ocf_part_set_prio(cache, dest_part, prio);
dest_part->config->cache_mode = cache_mode;
ocf_cache_log(cache, log_info,
"Updating unclassified IO class, id: "
"%u [ OK ]\n", part_id);
"Updating unclassified IO class, id: %u, name :'%s',"
"max size: %u%% [ OK ]\n",
part_id, dest_part->config->name, max);
return 0;
}
@@ -150,23 +162,23 @@ static int _ocf_mngt_io_class_configure(ocf_cache_t cache,
/* Try set partition size */
if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) {
ocf_cache_log(cache, log_info,
"Setting IO class size, id: %u, name: '%s' "
"[ ERROR ]\n", part_id, dest_part->config->name);
"Setting IO class size, id: %u, name: '%s', max size %u%%"
"[ ERROR ]\n", part_id, dest_part->config->name, max);
return -OCF_ERR_INVAL;
}
if (ocf_part_is_valid(dest_part)) {
/* Updating existing */
ocf_cache_log(cache, log_info, "Updating existing IO "
"class, id: %u, name: '%s' [ OK ]\n",
part_id, dest_part->config->name);
"class, id: %u, name: '%s', max size %u%% [ OK ]\n",
part_id, dest_part->config->name, max);
} else {
/* Adding new */
ocf_part_set_valid(cache, part_id, true);
ocf_cache_log(cache, log_info, "Adding new IO class, "
"id: %u, name: '%s' [ OK ]\n", part_id,
dest_part->config->name);
"id: %u, name: '%s', max size %u%% [ OK ]\n", part_id,
dest_part->config->name, max);
}
ocf_part_set_prio(cache, dest_part, prio);

View File

@@ -13,9 +13,10 @@
struct ocf_req_allocator;
struct ocf_req_info {
/* Number of hits, invalid, misses. */
/* Number of hits, invalid, misses, reparts. */
unsigned int hit_no;
unsigned int invalid_no;
unsigned int re_part_no;
uint32_t dirty_all;
/*!< Number of dirty line in request*/
@@ -32,11 +33,6 @@ struct ocf_req_info {
uint32_t mapping_error : 1;
/*!< Core lines in this request were not mapped into cache */
uint32_t re_part : 1;
/*!< This bit indicate that in the request some cache lines
* has to be moved to another partition
*/
uint32_t core_error : 1;
/*!< Error occured during I/O on core device */
@@ -191,6 +187,9 @@ struct ocf_request {
uint8_t wi_second_pass : 1;
/*!< Set after first pass of WI write is completed */
uint8_t part_evict : 1;
/* !< Some cachelines from request's partition must be evicted */
log_sid_t sid;
/*!< Tracing sequence ID */
@@ -332,6 +331,40 @@ void ocf_req_clear_map(struct ocf_request *req);
*/
void ocf_req_hash(struct ocf_request *req);
/**
* @brief Request should trigger eviction from it's target partition
*
* @param req - OCF request
*/
static inline void ocf_req_set_part_evict(struct ocf_request *req)
{
req->part_evict = true;
}
/**
* @brief Request shouldn't trigger eviction from it's target partition
*
* @param req - OCF request
*/
static inline void ocf_req_clear_part_evict(struct ocf_request *req)
{
req->part_evict = false;
}
/**
* @brief Check wheter request shouldn't trigger eviction from it's target
* partition or any partition
*
* @param req - OCF request
* @return true - Eviciton should be triggered from request's target partition
* @return false - Eviction should be triggered with respect to eviction
* priority
*/
static inline bool ocf_req_part_evict(struct ocf_request *req)
{
return req->part_evict;
}
int ocf_req_set_dirty(struct ocf_request *req);
/**
@@ -348,6 +381,16 @@ static inline void ocf_req_clear(struct ocf_request *req)
env_atomic_set(&req->req_remaining, 0);
}
static inline void ocf_req_set_mapping_error(struct ocf_request *req)
{
req->info.mapping_error = true;
}
static inline bool ocf_req_test_mapping_error(struct ocf_request *req)
{
return req->info.mapping_error;
}
/**
* @brief Return OCF request reference count
*

View File

@@ -276,10 +276,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
struct ocf_stats_io_class *stats)
{
ocf_cache_t cache;
uint32_t cache_occupancy_total = 0;
struct ocf_counters_part *part_stat;
ocf_core_t i_core;
ocf_core_id_t i_core_id;
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(stats);
@@ -292,11 +289,6 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
if (!ocf_part_is_valid(&cache->user_parts[part_id]))
return -OCF_ERR_IO_CLASS_NOT_EXIST;
for_each_core(cache, i_core, i_core_id) {
cache_occupancy_total += env_atomic_read(
&i_core->runtime_meta->cached_clines);
}
part_stat = &core->counters->part_counters[part_id];
stats->occupancy_clines = env_atomic_read(&core->runtime_meta->
@@ -304,8 +296,7 @@ int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
stats->dirty_clines = env_atomic_read(&core->runtime_meta->
part_counters[part_id].dirty_clines);
stats->free_clines = cache->conf_meta->cachelines -
cache_occupancy_total;
stats->free_clines = 0;
copy_req_stats(&stats->read_reqs, &part_stat->read_reqs);
copy_req_stats(&stats->write_reqs, &part_stat->write_reqs);

View File

@@ -229,15 +229,9 @@ static void _ocf_stats_part_fill(ocf_cache_t cache, ocf_part_id_t part_id,
_lines4k(stats->occupancy_clines, cache_line_size),
_lines4k(cache_size, cache_line_size));
if (part_id == PARTITION_DEFAULT) {
_set(&usage->free,
_lines4k(stats->free_clines, cache_line_size),
_lines4k(cache_size, cache_line_size));
} else {
_set(&usage->free,
_lines4k(0, cache_line_size),
_lines4k(0, cache_line_size));
}
_set(&usage->free,
_lines4k(stats->free_clines, cache_line_size),
_lines4k(cache_size, cache_line_size));
_set(&usage->clean,
_lines4k(stats->occupancy_clines - stats->dirty_clines,

View File

@@ -190,3 +190,82 @@ void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
}
}
}
static inline uint32_t ocf_part_evict_size(struct ocf_request *req)
{
uint32_t needed_cache_lines, part_available, cache_lines_to_evict;
uint32_t part_occupancy, part_occupancy_debt;
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
uint32_t part_occupancy_limit =
ocf_part_get_max_size(req->cache, target_part);
needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
part_occupancy = ocf_part_get_occupancy(target_part);
if (part_occupancy_limit >= part_occupancy) {
part_available = part_occupancy_limit - part_occupancy;
part_occupancy_debt = 0;
} else {
/* Occupancy is greater than occupancy limit. Evict missing number of
* cachelines, but no more than single eviction limit */
part_occupancy_debt = min((uint32_t)OCF_PENDING_EVICTION_LIMIT,
part_occupancy - part_occupancy_limit);
part_available = 0;
}
if (ocf_freelist_num_free(req->cache->freelist) <
ocf_engine_unmapped_count(req)) {
/* Number of cachelines to insert greater than number of free
* cachelines */
if (part_available >= needed_cache_lines) {
/* Cache is full, but target's part occupancy limit is not reached
*/
ocf_req_clear_part_evict(req);
cache_lines_to_evict = needed_cache_lines;
} else {
/* Cache is full and target part reached it's occupancy limit */
ocf_req_set_part_evict(req);
cache_lines_to_evict = needed_cache_lines - part_available;
}
} else if (part_available < needed_cache_lines) {
/* Enough of free cache lines, but partition reached it's occupancy
* limit */
cache_lines_to_evict = needed_cache_lines - part_available;
ocf_req_set_part_evict(req);
} else if (part_available >= needed_cache_lines) {
/* Enough free cachelines available and they can be assigned to target
* partition */
cache_lines_to_evict = 0;
}
return cache_lines_to_evict + part_occupancy_debt;
}
uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict)
{
uint32_t ret = OCF_PART_IS_FULL;
uint32_t _to_evict;
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
if (!ocf_part_is_enabled(target_part) &&
ocf_part_get_occupancy(target_part) == 0) {
/* If partition is disabled, but has assigned cachelines, eviction has
* to be triggered */
return OCF_PART_IS_DISABLED;
}
_to_evict = ocf_part_evict_size(req);
if (_to_evict == 0)
ret = OCF_PART_HAS_SPACE;
if (to_evict)
*to_evict = _to_evict;
return ret;
}

View File

@@ -8,6 +8,7 @@
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../metadata/metadata_partition.h"
void ocf_part_init(struct ocf_cache *cache);
@@ -50,6 +51,38 @@ static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
return PARTITION_DEFAULT;
}
static inline uint32_t ocf_part_get_occupancy(struct ocf_user_part *part)
{
return part->runtime->curr_size;
}
static inline uint32_t ocf_part_get_min_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size;
ioclass_size = part->config->min_size * cache->conf_meta->cachelines;
ioclass_size /= 100;
return (uint32_t)ioclass_size;
}
static inline uint32_t ocf_part_get_max_size(ocf_cache_t cache,
struct ocf_user_part *part)
{
uint64_t ioclass_size, max_size, cache_size;
max_size = part->config->max_size;
cache_size = cache->conf_meta->cachelines;
ioclass_size = max_size * cache_size;
ioclass_size = OCF_DIV_ROUND_UP(ioclass_size, 100);
return (uint32_t)ioclass_size;
}
void ocf_part_move(struct ocf_request *req);
#define for_each_part(cache, part, id) \
@@ -61,7 +94,27 @@ static inline void ocf_part_sort(struct ocf_cache *cache)
ocf_lst_sort(&cache->lst_part);
}
static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache,
static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
{
return part->config->max_size != 0;
}
#define OCF_PART_HAS_SPACE 0
#define OCF_PART_IS_FULL 1
#define OCF_PART_IS_DISABLED 2
/**
* Check whether there is enough free cachelines to serve request. If partition
* occupancy limit is reached, `req->part_evict` is set to true. Otherwise
* flag is set to false and eviction from any partition should be triggered.
*
* @return
* OCF_PART_HAS_SPACE when cachelines alloted successfully
* OCF_PART_IS_FULL when need to evict some cachelines to serve request
* OCF_PART_IS_DISABLED when caching for particular partition is disabled
*/
uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict);
static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)