
Eviction changes allowing to evict (remap) cachelines while holding hash bucket write lock instead of global metadata write lock. As eviction (replacement) is now tightly coupled with request, each request uses eviction size equal to number of its unmapped cachelines. Evicting without global metadata write lock is possible thanks to the fact that remaping is always performed while exclusively holding cacheline (read or write) lock. So for a cacheline on LRU list we acquire cacheline lock, safely resolve hash and consequently write-lock hash bucket. Since cacheline lock is acquired under hash bucket (everywhere except for new eviction implementation), we are certain that noone acquires cacheline lock behind our back. Concurrent eviction threads are eliminated by holding eviction list lock for the duration of critial locking operations. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
71 lines
1.9 KiB
C
71 lines
1.9 KiB
C
/*
|
|
* Copyright(c) 2012-2021 Intel Corporation
|
|
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
|
*/
|
|
|
|
#ifndef __LAYER_EVICTION_POLICY_H__
|
|
#define __LAYER_EVICTION_POLICY_H__
|
|
|
|
#include "ocf/ocf.h"
|
|
#include "lru.h"
|
|
#include "lru_structs.h"
|
|
|
|
#define OCF_PENDING_EVICTION_LIMIT 512UL
|
|
|
|
#define OCF_NUM_EVICTION_LISTS 32
|
|
|
|
struct ocf_user_part;
|
|
struct ocf_request;
|
|
|
|
struct eviction_policy {
|
|
union {
|
|
struct lru_eviction_policy lru;
|
|
} policy;
|
|
};
|
|
|
|
/* Eviction policy metadata per cache line */
|
|
union eviction_policy_meta {
|
|
struct lru_eviction_policy_meta lru;
|
|
} __attribute__((packed));
|
|
|
|
/* the caller must hold the metadata lock for all operations
|
|
*
|
|
* For range operations the caller can:
|
|
* set core_id to -1 to purge the whole cache device
|
|
* set core_id to -2 to purge the whole cache partition
|
|
*/
|
|
struct eviction_policy_ops {
|
|
void (*init_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
|
void (*rm_cline)(ocf_cache_t cache,
|
|
ocf_cache_line_t cline);
|
|
bool (*can_evict)(ocf_cache_t cache);
|
|
uint32_t (*req_clines)(struct ocf_request *req, struct ocf_user_part *part,
|
|
uint32_t cline_no);
|
|
void (*hot_cline)(ocf_cache_t cache, ocf_cache_line_t cline);
|
|
void (*init_evp)(ocf_cache_t cache, struct ocf_user_part *part);
|
|
void (*dirty_cline)(ocf_cache_t cache,
|
|
struct ocf_user_part *part,
|
|
uint32_t cline_no);
|
|
void (*clean_cline)(ocf_cache_t cache,
|
|
struct ocf_user_part *part,
|
|
uint32_t cline_no);
|
|
void (*flush_dirty)(ocf_cache_t cache, struct ocf_user_part *part,
|
|
ocf_queue_t io_queue, uint32_t count);
|
|
const char *name;
|
|
};
|
|
|
|
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
|
|
|
|
/*
|
|
* Deallocates space according to eviction priorities.
|
|
*
|
|
* @returns:
|
|
* 'LOOKUP_HIT' if evicted enough cachelines to serve @req
|
|
* 'LOOKUP_MISS' otherwise
|
|
*/
|
|
int space_managment_evict_do(struct ocf_request *req);
|
|
|
|
int space_management_free(ocf_cache_t cache, uint32_t count);
|
|
|
|
#endif
|