Per-execution-context freelists

Global free cacheline list is divided into a set of freelists, one
per execution context. When attempting to map addres to cache, first
the freelist for current execution context is considered (fast path).
If current execution context freelist is empty (fast path failure),
mapping function attempts to get freelist from other execution context
list (slow path).

The purpose of this change is improve concurrency in freelist access.
It is part of fine granularity metadata lock implementation.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2019-08-26 14:54:19 -04:00
parent f892bb962d
commit 13cf871a13
21 changed files with 545 additions and 204 deletions

View File

@@ -21,6 +21,7 @@
#include "ocf_logger_priv.h"
#include "ocf/ocf_trace.h"
#include "promotion/promotion.h"
#include "ocf_freelist.h"
#define DIRTY_FLUSHED 1
#define DIRTY_NOT_FLUSHED 0
@@ -82,8 +83,6 @@ struct ocf_cache_device {
uint64_t metadata_offset;
struct ocf_part *freelist_part;
struct {
struct ocf_cache_line_concurrency *cache_line;
} concurrency;
@@ -110,6 +109,8 @@ struct ocf_cache {
struct ocf_metadata metadata;
ocf_freelist_t freelist;
ocf_eviction_t eviction_policy_init;
struct {
@@ -199,4 +200,16 @@ static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
#define ocf_cache_log_rl(cache) \
ocf_log_rl(ocf_cache_get_ctx(cache))
static inline uint64_t ocf_get_cache_occupancy(ocf_cache_t cache)
{
uint64_t result = 0;
ocf_core_t core;
ocf_core_id_t core_id;
for_each_core(cache, core, core_id)
result += env_atomic_read(&core->runtime_meta->cached_clines);
return result;
}
#endif /* __OCF_CACHE_PRIV_H__ */