Parallel eviction
Eviction changes allowing to evict (remap) cachelines while holding hash bucket write lock instead of global metadata write lock. As eviction (replacement) is now tightly coupled with request, each request uses eviction size equal to number of its unmapped cachelines. Evicting without global metadata write lock is possible thanks to the fact that remaping is always performed while exclusively holding cacheline (read or write) lock. So for a cacheline on LRU list we acquire cacheline lock, safely resolve hash and consequently write-lock hash bucket. Since cacheline lock is acquired under hash bucket (everywhere except for new eviction implementation), we are certain that noone acquires cacheline lock behind our back. Concurrent eviction threads are eliminated by holding eviction list lock for the duration of critial locking operations. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
@@ -12,6 +12,7 @@
|
||||
#include "utils_part.h"
|
||||
#include "utils_io.h"
|
||||
#include "utils_cache_line.h"
|
||||
#include "../ocf_queue_priv.h"
|
||||
|
||||
#define OCF_UTILS_CLEANER_DEBUG 0
|
||||
|
||||
@@ -847,6 +848,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
int err;
|
||||
ocf_core_id_t core_id;
|
||||
uint64_t core_sector;
|
||||
bool skip;
|
||||
|
||||
/* Allocate master request */
|
||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||
@@ -869,7 +871,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
env_atomic_inc(&master->master_remaining);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
||||
/* when request hasn't yet been allocated or is just issued */
|
||||
if (!req) {
|
||||
if (max > count - i) {
|
||||
@@ -900,12 +901,23 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_lock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
skip = false;
|
||||
|
||||
/* when line already cleaned - rare condition under heavy
|
||||
* I/O workload.
|
||||
*/
|
||||
if (!metadata_test_dirty(cache, cache_line)) {
|
||||
OCF_DEBUG_MSG(cache, "Not dirty");
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
if (!metadata_test_valid_any(cache, cache_line)) {
|
||||
@@ -916,12 +928,16 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
* Cache line (sector) cannot be dirty and not valid
|
||||
*/
|
||||
ENV_BUG();
|
||||
continue;
|
||||
skip = true;
|
||||
}
|
||||
|
||||
/* Get mapping info */
|
||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
||||
&core_sector);
|
||||
if (attribs->lock_metadata) {
|
||||
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock,
|
||||
req->lock_idx, core_id, core_sector);
|
||||
}
|
||||
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
if (unlikely(!cache->core[core_id].opened)) {
|
||||
OCF_DEBUG_MSG(cache, "Core object inactive");
|
||||
@@ -945,6 +961,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||
i_out = 0;
|
||||
req = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (req) {
|
||||
@@ -1036,7 +1053,7 @@ void ocf_cleaner_refcnt_freeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_freeze(&curr_part->cleaning);
|
||||
ocf_refcnt_freeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
@@ -1045,7 +1062,7 @@ void ocf_cleaner_refcnt_unfreeze(ocf_cache_t cache)
|
||||
ocf_part_id_t part_id;
|
||||
|
||||
for_each_part(cache, curr_part, part_id)
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning);
|
||||
ocf_refcnt_unfreeze(&curr_part->cleaning.counter);
|
||||
}
|
||||
|
||||
static void ocf_cleaner_refcnt_register_zero_cb_finish(void *priv)
|
||||
@@ -1069,7 +1086,7 @@ void ocf_cleaner_refcnt_register_zero_cb(ocf_cache_t cache,
|
||||
|
||||
for_each_part(cache, curr_part, part_id) {
|
||||
env_atomic_inc(&ctx->waiting);
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning,
|
||||
ocf_refcnt_register_zero_cb(&curr_part->cleaning.counter,
|
||||
ocf_cleaner_refcnt_register_zero_cb_finish, ctx);
|
||||
}
|
||||
|
||||
|
@@ -27,6 +27,7 @@ typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
|
||||
*/
|
||||
struct ocf_cleaner_attribs {
|
||||
uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */
|
||||
uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */
|
||||
|
||||
uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
|
||||
|
||||
@@ -40,7 +41,7 @@ struct ocf_cleaner_attribs {
|
||||
void *getter_context;
|
||||
/*!< Context for getting cache lines */
|
||||
uint32_t getter_item;
|
||||
/*!< Additional variable that can be used by cleaner caller
|
||||
/*!< Additional variable that can be used by cleaner call
|
||||
* to iterate over items
|
||||
*/
|
||||
|
||||
|
@@ -103,7 +103,12 @@ void ocf_part_move(struct ocf_request *req)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry->status != LOOKUP_HIT) {
|
||||
/* Moving cachelines to another partition is needed only
|
||||
* for those already mapped before this request, which
|
||||
* indicates either HIT or REMAPPED.
|
||||
*/
|
||||
if (entry->status != LOOKUP_HIT &&
|
||||
entry->status != LOOKUP_REMAPPED) {
|
||||
/* No HIT */
|
||||
continue;
|
||||
}
|
||||
|
Reference in New Issue
Block a user