
Eviction changes allowing to evict (remap) cachelines while holding hash bucket write lock instead of global metadata write lock. As eviction (replacement) is now tightly coupled with request, each request uses eviction size equal to number of its unmapped cachelines. Evicting without global metadata write lock is possible thanks to the fact that remaping is always performed while exclusively holding cacheline (read or write) lock. So for a cacheline on LRU list we acquire cacheline lock, safely resolve hash and consequently write-lock hash bucket. Since cacheline lock is acquired under hash bucket (everywhere except for new eviction implementation), we are certain that noone acquires cacheline lock behind our back. Concurrent eviction threads are eliminated by holding eviction list lock for the duration of critial locking operations. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
49 lines
850 B
C
49 lines
850 B
C
/*
|
|
* Copyright(c) 2012-2021 Intel Corporation
|
|
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
|
*/
|
|
|
|
#ifndef OCF_QUEUE_PRIV_H_
|
|
#define OCF_QUEUE_PRIV_H_
|
|
|
|
#include "ocf_env.h"
|
|
|
|
struct ocf_queue {
|
|
ocf_cache_t cache;
|
|
|
|
env_atomic io_no;
|
|
|
|
env_atomic ref_count;
|
|
|
|
struct list_head io_list;
|
|
env_spinlock io_list_lock;
|
|
|
|
/* per-queue free running global metadata lock index */
|
|
unsigned lock_idx;
|
|
|
|
/* per-queue free running eviction list index */
|
|
unsigned eviction_idx;
|
|
|
|
/* Tracing reference counter */
|
|
env_atomic64 trace_ref_cntr;
|
|
|
|
/* Tracing stop request */
|
|
env_atomic trace_stop;
|
|
|
|
struct list_head list;
|
|
|
|
const struct ocf_queue_ops *ops;
|
|
|
|
void *priv;
|
|
};
|
|
|
|
static inline void ocf_queue_kick(ocf_queue_t queue, bool allow_sync)
|
|
{
|
|
if (allow_sync && queue->ops->kick_sync)
|
|
queue->ops->kick_sync(queue);
|
|
else
|
|
queue->ops->kick(queue);
|
|
}
|
|
|
|
#endif
|