Per-execution-context freelists

Global free cacheline list is divided into a set of freelists, one
per execution context. When attempting to map addres to cache, first
the freelist for current execution context is considered (fast path).
If current execution context freelist is empty (fast path failure),
mapping function attempts to get freelist from other execution context
list (slow path).

The purpose of this change is improve concurrency in freelist access.
It is part of fine granularity metadata lock implementation.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2019-08-26 14:54:19 -04:00
parent f892bb962d
commit 13cf871a13
21 changed files with 545 additions and 204 deletions

View File

@@ -107,11 +107,13 @@ int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no)
{
uint32_t evicted;
uint32_t free;
if (evict_cline_no <= cache->device->freelist_part->curr_size)
free = ocf_freelist_num_free(cache->freelist);
if (evict_cline_no <= free)
return LOOKUP_MAPPED;
evict_cline_no -= cache->device->freelist_part->curr_size;
evict_cline_no -= free;
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no,
req->part_id);

View File

@@ -32,8 +32,7 @@ union eviction_policy_meta {
* set core_id to -2 to purge the whole cache partition
*/
struct eviction_policy_ops {
void (*init_cline)(ocf_cache_t cache,
ocf_cache_line_t cline);
void (*init_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
void (*rm_cline)(ocf_cache_t cache,
ocf_cache_line_t cline);
bool (*can_evict)(ocf_cache_t cache);

View File

@@ -8,8 +8,7 @@
#include "eviction.h"
#include "lru_structs.h"
void evp_lru_init_cline(struct ocf_cache *cache,
ocf_cache_line_t cline);
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_cache *cache, ocf_queue_t io_queue,