Merge pull request #451 from arutk/exact_evict_count

only request evict size equal to request unmapped count
This commit is contained in:
Robert Baldyga
2021-02-11 10:47:12 +01:00
committed by GitHub
8 changed files with 409 additions and 676 deletions

View File

@@ -430,55 +430,50 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req,
{
int lock_status = -OCF_ERR_NO_LOCK;
struct ocf_metadata_lock *metadata_lock = &req->cache->metadata.lock;
uint32_t clines_to_evict = 0;
int res;
/* requests to disabled partitions go in pass-through */
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
return lock_status;
}
if (!ocf_part_has_space(req)) {
ocf_req_hash_unlock_rd(req);
goto eviction;
}
/* Mapping must be performed holding (at least) hash-bucket write lock */
ocf_req_hash_lock_upgrade(req);
/* Verify whether partition occupancy threshold is not reached yet or cache
* is not out of free cachelines */
res = ocf_part_check_space(req, &clines_to_evict);
if (res == OCF_PART_IS_DISABLED) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_wr(req);
return lock_status;
}
if (res == OCF_PART_HAS_SPACE) {
ocf_engine_map(req);
if (ocf_req_test_mapping_error(req)) {
goto eviction;
}
ocf_engine_map(req);
if (!ocf_req_test_mapping_error(req)) {
lock_status = lock_clines(req, engine_cbs);
if (lock_status < 0) {
/* Mapping succeeded, but we failed to acquire cacheline lock.
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
ocf_req_hash_unlock_wr(req);
return lock_status;
}
eviction:
ocf_req_hash_unlock_wr(req);
eviction:
ocf_metadata_start_exclusive_access(metadata_lock);
ocf_part_check_space(req, &clines_to_evict);
/* repeat traversation to pick up latest metadata status */
ocf_engine_traverse(req);
if (space_managment_evict_do(req->cache, req, clines_to_evict) ==
LOOKUP_MISS) {
ocf_req_set_mapping_error(req);
goto unlock;
}
if (!ocf_part_has_space(req))
ocf_req_set_part_evict(req);
else
ocf_req_clear_part_evict(req);
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
/* Partition is disabled but it had cachelines assigned. Now, that they
* are evicted, don't try to map cachelines - we don't want to insert
* new cachelines - the request should be submited in pass through mode
* instead */
if (space_managment_evict_do(req->cache, req,
ocf_engine_unmapped_count(req)) == LOOKUP_MISS) {
ocf_req_set_mapping_error(req);
goto unlock;
}

View File

@@ -108,7 +108,7 @@ int ocf_read_fast(struct ocf_request *req)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
bool part_has_space = false;
bool part_has_space;
/* Get OCF request - increase reference counter */
ocf_req_get(req);
@@ -126,8 +126,7 @@ int ocf_read_fast(struct ocf_request *req)
hit = ocf_engine_is_hit(req);
if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
part_has_space = true;
part_has_space = ocf_part_has_space(req);
if (hit && part_has_space) {
ocf_io_start(&req->ioi.io);
@@ -197,8 +196,7 @@ int ocf_write_fast(struct ocf_request *req)
mapped = ocf_engine_is_mapped(req);
if (ocf_part_check_space(req, NULL) == OCF_PART_HAS_SPACE)
part_has_space = true;
part_has_space = ocf_part_has_space(req);
if (mapped && part_has_space) {
ocf_io_start(&req->ioi.io);

View File

@@ -60,53 +60,92 @@ static inline uint32_t ocf_evict_part_do(ocf_cache_t cache,
target_part, to_evict);
}
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
ocf_queue_t io_queue, const uint32_t evict_cline_no,
struct ocf_user_part *target_part)
static inline uint32_t ocf_evict_partitions(ocf_cache_t cache,
ocf_queue_t io_queue, uint32_t evict_cline_no,
bool overflown_only, uint32_t max_priority)
{
uint32_t to_evict = 0, evicted = 0;
struct ocf_user_part *part;
ocf_part_id_t part_id;
unsigned overflow_size;
/* For each partition from the lowest priority to highest one */
for_each_part(cache, part, part_id) {
if (!ocf_eviction_can_evict(cache))
goto out;
/*
* Check stop and continue conditions
*/
if (target_part->config->priority > part->config->priority) {
if (max_priority > part->config->priority) {
/*
* iterate partition have higher priority, do not evict
* iterate partition have higher priority,
* do not evict
*/
break;
}
if (!part->config->flags.eviction) {
/* It seams that no more partition for eviction */
/* no more partitions available for viction
*/
break;
}
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop */
goto out;
if (overflown_only) {
overflow_size = ocf_part_overflow_size(cache, part);
if (overflow_size == 0)
continue;
}
to_evict = ocf_evict_calculate(cache, part, evict_cline_no,
true);
to_evict = ocf_evict_calculate(cache, part,
evict_cline_no - evicted, true);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
continue;
}
if (overflown_only)
to_evict = OCF_MIN(to_evict, overflow_size);
evicted += ocf_eviction_need_space(cache, io_queue,
part, to_evict);
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop
*/
goto out;
}
}
out:
return evicted;
}
static inline uint32_t ocf_evict_do(ocf_cache_t cache,
ocf_queue_t io_queue, uint32_t evict_cline_no,
struct ocf_user_part *target_part)
{
uint32_t evicted;
/* First attempt to evict overflown partitions in order to
* achieve configured maximum size. Ignoring partitions
* priority in this case, as overflown partitions should
* free its cachelines regardless of destination partition
* priority. */
evicted = ocf_evict_partitions(cache, io_queue, evict_cline_no,
true, OCF_IO_CLASS_PRIO_HIGHEST);
if (evicted >= evict_cline_no)
return evicted;
/* Not enough cachelines in overflown partitions. Go through
* partitions with priority <= target partition and attempt
* to evict from those. */
evict_cline_no -= evicted;
evicted += ocf_evict_partitions(cache, io_queue, evict_cline_no,
false, target_part->config->priority);
return evicted;
}
int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no)
{

View File

@@ -190,82 +190,3 @@ void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
}
}
}
static inline uint32_t ocf_part_evict_size(struct ocf_request *req)
{
uint32_t needed_cache_lines, part_available, cache_lines_to_evict;
uint32_t part_occupancy, part_occupancy_debt;
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
uint32_t part_occupancy_limit =
ocf_part_get_max_size(req->cache, target_part);
needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
part_occupancy = ocf_part_get_occupancy(target_part);
if (part_occupancy_limit >= part_occupancy) {
part_available = part_occupancy_limit - part_occupancy;
part_occupancy_debt = 0;
} else {
/* Occupancy is greater than occupancy limit. Evict missing number of
* cachelines, but no more than single eviction limit */
part_occupancy_debt = min((uint32_t)OCF_PENDING_EVICTION_LIMIT,
part_occupancy - part_occupancy_limit);
part_available = 0;
}
if (ocf_freelist_num_free(req->cache->freelist) <
ocf_engine_unmapped_count(req)) {
/* Number of cachelines to insert greater than number of free
* cachelines */
if (part_available >= needed_cache_lines) {
/* Cache is full, but target's part occupancy limit is not reached
*/
ocf_req_clear_part_evict(req);
cache_lines_to_evict = needed_cache_lines;
} else {
/* Cache is full and target part reached it's occupancy limit */
ocf_req_set_part_evict(req);
cache_lines_to_evict = needed_cache_lines - part_available;
}
} else if (part_available < needed_cache_lines) {
/* Enough of free cache lines, but partition reached it's occupancy
* limit */
cache_lines_to_evict = needed_cache_lines - part_available;
ocf_req_set_part_evict(req);
} else if (part_available >= needed_cache_lines) {
/* Enough free cachelines available and they can be assigned to target
* partition */
cache_lines_to_evict = 0;
}
return cache_lines_to_evict + part_occupancy_debt;
}
uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict)
{
uint32_t ret = OCF_PART_IS_FULL;
uint32_t _to_evict;
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
if (!ocf_part_is_enabled(target_part) &&
ocf_part_get_occupancy(target_part) == 0) {
/* If partition is disabled, but has assigned cachelines, eviction has
* to be triggered */
return OCF_PART_IS_DISABLED;
}
_to_evict = ocf_part_evict_size(req);
if (_to_evict == 0)
ret = OCF_PART_HAS_SPACE;
if (to_evict)
*to_evict = _to_evict;
return ret;
}

View File

@@ -100,20 +100,29 @@ static inline bool ocf_part_is_enabled(struct ocf_user_part *part)
return part->config->max_size != 0;
}
#define OCF_PART_HAS_SPACE 0
#define OCF_PART_IS_FULL 1
#define OCF_PART_IS_DISABLED 2
/**
* Check whether there is enough free cachelines to serve request. If partition
* occupancy limit is reached, `req->part_evict` is set to true. Otherwise
* flag is set to false and eviction from any partition should be triggered.
*
* @return
* OCF_PART_HAS_SPACE when cachelines alloted successfully
* OCF_PART_IS_FULL when need to evict some cachelines to serve request
* OCF_PART_IS_DISABLED when caching for particular partition is disabled
*/
uint32_t ocf_part_check_space(struct ocf_request *req, uint32_t *to_evict);
static inline uint32_t ocf_part_overflow_size(struct ocf_cache *cache,
struct ocf_user_part *part)
{
uint32_t part_occupancy = ocf_part_get_occupancy(part);
uint32_t part_occupancy_limit = ocf_part_get_max_size(cache, part);
if (part_occupancy > part_occupancy_limit)
return part_occupancy - part_occupancy_limit;
return 0;
}
static inline bool ocf_part_has_space(struct ocf_request *req)
{
struct ocf_user_part *target_part = &req->cache->user_parts[req->part_id];
uint64_t part_occupancy_limit =
ocf_part_get_max_size(req->cache, target_part);
uint64_t needed_cache_lines = ocf_engine_repart_count(req) +
ocf_engine_unmapped_count(req);
uint64_t part_occupancy = ocf_part_get_occupancy(target_part);
return (part_occupancy + needed_cache_lines <= part_occupancy_limit);
}
static inline ocf_cache_mode_t ocf_part_get_cache_mode(ocf_cache_t cache,
ocf_part_id_t part_id)