Merge pull request #452 from arutk/split_gml_master

Split global metadata lock
This commit is contained in:
Robert Baldyga 2021-02-15 18:10:36 +01:00 committed by GitHub
commit fe206a86ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 321 additions and 181 deletions

View File

@ -13,6 +13,7 @@
#include "../cleaning/acp.h"
#include "../engine/engine_common.h"
#include "../concurrency/ocf_cache_line_concurrency.h"
#include "../concurrency/ocf_metadata_concurrency.h"
#include "cleaning_priv.h"
#define OCF_ACP_DEBUG 0
@ -385,8 +386,11 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
{
struct ocf_map_info info;
bool locked = false;
unsigned lock_idx = ocf_metadata_concurrency_next_idx(
cache->cleaner.io_queue);
ocf_metadata_hash_lock_rd(&cache->metadata.lock, core_id, core_line);
ocf_hb_cline_prot_lock_rd(&cache->metadata.lock, lock_idx, core_id,
core_line);
ocf_engine_lookup_map_entry(cache, &info, core_id,
core_line);
@ -397,7 +401,8 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
locked = true;
}
ocf_metadata_hash_unlock_rd(&cache->metadata.lock, core_id, core_line);
ocf_hb_cline_prot_unlock_rd(&cache->metadata.lock, lock_idx, core_id,
core_line);
return locked ? info.coll_idx : cache->device->collision_table_entries;
}

View File

@ -5,12 +5,14 @@
#include "ocf_metadata_concurrency.h"
#include "../metadata/metadata_misc.h"
#include "../ocf_queue_priv.h"
int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
{
int err = 0;
unsigned evp_iter;
unsigned part_iter;
unsigned global_iter;
for (evp_iter = 0; evp_iter < OCF_NUM_EVICTION_LISTS; evp_iter++) {
err = env_spinlock_init(&metadata_lock->eviction[evp_iter]);
@ -20,22 +22,29 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
env_rwlock_init(&metadata_lock->status);
err = env_rwsem_init(&metadata_lock->global);
if (err)
goto rwsem_err;
for (global_iter = 0; global_iter < OCF_NUM_GLOBAL_META_LOCKS;
global_iter++) {
err = env_rwsem_init(&metadata_lock->global[global_iter].sem);
if (err)
goto global_err;
}
for (part_iter = 0; part_iter < OCF_IO_CLASS_MAX; part_iter++) {
err = env_spinlock_init(&metadata_lock->partition[part_iter]);
if (err)
goto spinlocks_err;
goto partition_err;
}
return err;
spinlocks_err:
partition_err:
while (part_iter--)
env_spinlock_destroy(&metadata_lock->partition[part_iter]);
rwsem_err:
global_err:
while (global_iter--)
env_rwsem_destroy(&metadata_lock->global[global_iter].sem);
env_rwlock_destroy(&metadata_lock->status);
eviction_err:
@ -55,8 +64,10 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
env_spinlock_destroy(&metadata_lock->eviction[i]);
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++)
env_rwsem_destroy(&metadata_lock->global[i].sem);
env_rwlock_destroy(&metadata_lock->status);
env_rwsem_destroy(&metadata_lock->global);
}
int ocf_metadata_concurrency_attached_init(
@ -140,39 +151,79 @@ void ocf_metadata_concurrency_attached_deinit(
void ocf_metadata_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_down_write(&metadata_lock->global);
unsigned i;
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++) {
env_rwsem_down_write(&metadata_lock->global[i].sem);
}
}
int ocf_metadata_try_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
return env_rwsem_down_write_trylock(&metadata_lock->global);
unsigned i;
int error;
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++) {
error = env_rwsem_down_write_trylock(&metadata_lock->global[i].sem);
if (error)
break;
}
if (error) {
while (i--) {
env_rwsem_up_write(&metadata_lock->global[i].sem);
}
}
return error;
}
void ocf_metadata_end_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_up_write(&metadata_lock->global);
unsigned i;
for (i = OCF_NUM_GLOBAL_META_LOCKS; i > 0; i--)
env_rwsem_up_write(&metadata_lock->global[i - 1].sem);
}
/* lock_idx determines which of underlying R/W locks is acquired for read. The goal
is to spread calls across all available underlying locks to reduce contention
on one single RW semaphor primitive. Technically any value is correct, but
picking wisely would allow for higher read througput:
* free running per-cpu counter sounds good,
* for rarely excercised code paths (e.g. management) any value would do.
*/
void ocf_metadata_start_shared_access(
struct ocf_metadata_lock *metadata_lock)
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
env_rwsem_down_read(&metadata_lock->global);
env_rwsem_down_read(&metadata_lock->global[lock_idx].sem);
}
int ocf_metadata_try_start_shared_access(
struct ocf_metadata_lock *metadata_lock)
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
return env_rwsem_down_read_trylock(&metadata_lock->global);
return env_rwsem_down_read_trylock(&metadata_lock->global[lock_idx].sem);
}
void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock)
void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
env_rwsem_up_read(&metadata_lock->global);
env_rwsem_up_read(&metadata_lock->global[lock_idx].sem);
}
void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
/* NOTE: Calling 'naked' lock/unlock requires caller to hold global metadata
shared (aka read) lock
NOTE: Using 'naked' variants to lock multiple hash buckets is prone to
deadlocks if not locking in the the order of increasing hash bucket
number. Preffered way to lock multiple hash buckets is to use
request lock rountines ocf_req_hash_(un)lock_(rd/wr).
*/
static inline void ocf_hb_id_naked_lock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
@ -185,7 +236,8 @@ void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
ENV_BUG();
}
void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
static inline void ocf_hb_id_naked_unlock(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
@ -198,7 +250,7 @@ void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
ENV_BUG();
}
int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
static int ocf_hb_id_naked_trylock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
int result = -1;
@ -215,81 +267,120 @@ int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
ENV_BUG();
}
if (!result)
return -1;
return 0;
return result;
}
void ocf_metadata_lock_hash_rd(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
{
ocf_metadata_start_shared_access(metadata_lock);
ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD);
}
void ocf_metadata_unlock_hash_rd(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
{
ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD);
ocf_metadata_end_shared_access(metadata_lock);
}
void ocf_metadata_lock_hash_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
{
ocf_metadata_start_shared_access(metadata_lock);
ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR);
}
void ocf_metadata_unlock_hash_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
{
ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR);
ocf_metadata_end_shared_access(metadata_lock);
}
/* NOTE: attempt to acquire hash lock for multiple core lines may end up
* in deadlock. In order to hash lock multiple core lines safely, use
* ocf_req_hash_lock_* functions */
void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
bool ocf_hb_cline_naked_trylock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_metadata_start_shared_access(metadata_lock);
ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_RD);
return (0 == ocf_hb_id_naked_trylock(metadata_lock, hash,
OCF_METADATA_WR));
}
void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
bool ocf_hb_cline_naked_trylock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_RD);
ocf_metadata_end_shared_access(metadata_lock);
return (0 == ocf_hb_id_naked_trylock(metadata_lock, hash,
OCF_METADATA_RD));
}
void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
void ocf_hb_cline_naked_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_metadata_start_shared_access(metadata_lock);
ocf_metadata_hash_lock(metadata_lock, hash, OCF_METADATA_WR);
ocf_hb_id_naked_unlock(metadata_lock, hash, OCF_METADATA_RD);
}
void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_metadata_hash_unlock(metadata_lock, hash, OCF_METADATA_WR);
ocf_metadata_end_shared_access(metadata_lock);
ocf_hb_id_naked_unlock(metadata_lock, hash, OCF_METADATA_WR);
}
/* common part of protected hash bucket lock routines */
static inline void ocf_hb_id_prot_lock_common(
struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, ocf_cache_line_t hash, int rw)
{
ocf_metadata_start_shared_access(metadata_lock, lock_idx);
ocf_hb_id_naked_lock(metadata_lock, hash, rw);
}
/* common part of protected hash bucket unlock routines */
static inline void ocf_hb_id_prot_unlock_common(
struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, ocf_cache_line_t hash, int rw)
{
ocf_hb_id_naked_unlock(metadata_lock, hash, rw);
ocf_metadata_end_shared_access(metadata_lock, lock_idx);
}
/* NOTE: caller can lock at most one hash bucket at a time using protected
variants of lock routines. */
void ocf_hb_cline_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_WR);
}
void ocf_hb_cline_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_WR);
}
void ocf_hb_cline_prot_lock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_RD);
}
void ocf_hb_cline_prot_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_RD);
}
void ocf_hb_id_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash)
{
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx, hash,
OCF_METADATA_WR);
}
void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash)
{
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx, hash,
OCF_METADATA_WR);
}
/* number of hash entries */
@ -342,62 +433,66 @@ void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
for (hash = _MIN_HASH(req); hash <= _MAX_HASH(req); \
hash = _HASH_NEXT(req, hash))
void ocf_req_hash_lock_rd(struct ocf_request *req)
void ocf_hb_req_prot_lock_rd(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
ocf_metadata_start_shared_access(&req->cache->metadata.lock,
req->lock_idx);
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
}
void ocf_req_hash_unlock_rd(struct ocf_request *req)
void ocf_hb_req_prot_unlock_rd(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
ocf_metadata_end_shared_access(&req->cache->metadata.lock,
req->lock_idx);
}
void ocf_req_hash_lock_wr(struct ocf_request *req)
void ocf_hb_req_prot_lock_wr(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
ocf_metadata_start_shared_access(&req->cache->metadata.lock,
req->lock_idx);
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
void ocf_req_hash_lock_upgrade(struct ocf_request *req)
void ocf_hb_req_prot_lock_upgrade(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
void ocf_req_hash_unlock_wr(struct ocf_request *req)
void ocf_hb_req_prot_unlock_wr(struct ocf_request *req)
{
ocf_cache_line_t hash;
for_each_req_hash_asc(req, hash) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
ocf_metadata_end_shared_access(&req->cache->metadata.lock,
req->lock_idx);
}
void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,

View File

@ -4,6 +4,7 @@
*/
#include "../ocf_cache_priv.h"
#include "../eviction/eviction.h"
#include "../ocf_queue_priv.h"
#ifndef __OCF_METADATA_CONCURRENCY_H__
#define __OCF_METADATA_CONCURRENCY_H__
@ -11,6 +12,11 @@
#define OCF_METADATA_RD 0
#define OCF_METADATA_WR 1
static inline unsigned ocf_metadata_concurrency_next_idx(ocf_queue_t q)
{
return q->lock_idx++ % OCF_NUM_GLOBAL_META_LOCKS;
}
int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock);
void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock);
@ -90,13 +96,16 @@ void ocf_metadata_end_exclusive_access(
struct ocf_metadata_lock *metadata_lock);
int ocf_metadata_try_start_shared_access(
struct ocf_metadata_lock *metadata_lock);
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx);
void ocf_metadata_start_shared_access(
struct ocf_metadata_lock *metadata_lock);
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx);
void ocf_metadata_end_shared_access(
struct ocf_metadata_lock *metadata_lock);
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx);
static inline void ocf_metadata_status_bits_lock(
struct ocf_metadata_lock *metadata_lock, int rw)
@ -136,32 +145,38 @@ static inline void ocf_metadata_status_bits_unlock(
ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
OCF_METADATA_WR)
/* lock/unlock single hash */
void ocf_metadata_lock_hash_rd(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
void ocf_metadata_unlock_hash_rd(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
void ocf_metadata_lock_hash_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
void ocf_metadata_unlock_hash_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash);
void ocf_hb_cline_prot_lock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line);
void ocf_hb_cline_prot_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line);
/* lock/unlock single hash provided core id and core line */
void ocf_metadata_hash_lock_rd(struct ocf_metadata_lock *metadata_lock,
void ocf_hb_cline_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line);
void ocf_hb_cline_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t lock_idx, uint32_t core_id, uint64_t core_line);
void ocf_hb_id_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash);
void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash);
/* caller must hold global metadata read lock */
bool ocf_hb_cline_naked_trylock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
void ocf_metadata_hash_unlock_rd(struct ocf_metadata_lock *metadata_lock,
void ocf_hb_cline_naked_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
void ocf_metadata_hash_lock_wr(struct ocf_metadata_lock *metadata_lock,
bool ocf_hb_cline_naked_trylock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
void ocf_metadata_hash_unlock_wr(struct ocf_metadata_lock *metadata_lock,
void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line);
/* lock entire request in deadlock-free manner */
void ocf_req_hash_lock_rd(struct ocf_request *req);
void ocf_req_hash_unlock_rd(struct ocf_request *req);
void ocf_req_hash_lock_wr(struct ocf_request *req);
void ocf_req_hash_unlock_wr(struct ocf_request *req);
void ocf_req_hash_lock_upgrade(struct ocf_request *req);
void ocf_hb_req_prot_lock_rd(struct ocf_request *req);
void ocf_hb_req_prot_unlock_rd(struct ocf_request *req);
void ocf_hb_req_prot_lock_wr(struct ocf_request *req);
void ocf_hb_req_prot_unlock_wr(struct ocf_request *req);
void ocf_hb_req_prot_lock_upgrade(struct ocf_request *req);
/* collision table page lock interface */
void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,

View File

@ -434,17 +434,17 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req,
/* requests to disabled partitions go in pass-through */
if (!ocf_part_is_enabled(&req->cache->user_parts[req->part_id])) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
return lock_status;
}
if (!ocf_part_has_space(req)) {
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
goto eviction;
}
/* Mapping must be performed holding (at least) hash-bucket write lock */
ocf_req_hash_lock_upgrade(req);
ocf_hb_req_prot_lock_upgrade(req);
ocf_engine_map(req);
@ -455,11 +455,11 @@ static inline int ocf_prepare_clines_miss(struct ocf_request *req,
* Don't try to evict, just return error to caller */
ocf_req_set_mapping_error(req);
}
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
return lock_status;
}
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
eviction:
ocf_metadata_start_exclusive_access(metadata_lock);
@ -505,7 +505,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
/* Read-lock hash buckets associated with request target core & LBAs
* (core lines) to assure that cache mapping for these core lines does
* not change during traversation */
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Traverse to check if request is mapped fully */
ocf_engine_traverse(req);
@ -513,7 +513,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
mapped = ocf_engine_is_mapped(req);
if (mapped) {
lock = lock_clines(req, engine_cbs);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
return lock;
}
@ -522,7 +522,7 @@ int ocf_engine_prepare_clines(struct ocf_request *req,
req->cache->promotion_policy, req);
if (!promote) {
ocf_req_set_mapping_error(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
return lock;
}
@ -676,11 +676,11 @@ static int _ocf_engine_refresh(struct ocf_request *req)
int result;
/* Check under metadata RD lock */
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
result = ocf_engine_check(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
if (result == 0) {

View File

@ -170,7 +170,7 @@ int _ocf_discard_step_do(struct ocf_request *req)
if (ocf_engine_mapped_count(req)) {
/* There are mapped cache line, need to remove them */
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
@ -181,16 +181,16 @@ int _ocf_discard_step_do(struct ocf_request *req)
_ocf_discard_step_complete);
}
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Even if no cachelines are mapped they could be tracked in promotion
* policy. RD lock suffices. */
ocf_promotion_req_purge(req->cache->promotion_policy, req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
OCF_DEBUG_RQ(req, "Discard");
_ocf_discard_step_complete(req, 0);
@ -228,7 +228,7 @@ static int _ocf_discard_step(struct ocf_request *req)
0));
ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
@ -240,7 +240,7 @@ static int _ocf_discard_step(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED;
}
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {

View File

@ -72,14 +72,14 @@ static int _ocf_read_fast_do(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
/* Submit IO */
@ -119,7 +119,7 @@ int ocf_read_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
@ -133,7 +133,7 @@ int ocf_read_fast(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
if (hit && part_has_space) {
OCF_DEBUG_RQ(req, "Fast path success");
@ -189,7 +189,7 @@ int ocf_write_fast(struct ocf_request *req)
/*- Metadata RD access -----------------------------------------------*/
ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Traverse request to cache if there is hit */
ocf_engine_traverse(req);
@ -203,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
if (mapped && part_has_space) {
if (lock >= 0) {

View File

@ -43,9 +43,9 @@ static int _ocf_invalidate_do(struct ocf_request *req)
ENV_BUG_ON(env_atomic_read(&req->req_remaining));
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
ocf_purge_map_info(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
env_atomic_inc(&req->req_remaining);

View File

@ -56,10 +56,10 @@ int ocf_read_pt_do(struct ocf_request *req)
ocf_req_get(req);
if (req->info.dirty_any) {
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Need to clean, start it */
ocf_engine_clean(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
/* Do not processing, because first we need to clean request */
ocf_req_put(req);
@ -70,14 +70,14 @@ int ocf_read_pt_do(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
/* Submit read IO to the core */
@ -115,7 +115,7 @@ int ocf_read_pt(struct ocf_request *req)
req->io_if = &_io_if_pt_resume;
ocf_req_hash(req);
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
@ -134,7 +134,7 @@ int ocf_read_pt(struct ocf_request *req)
}
}
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
if (use_cache) {
/*

View File

@ -151,12 +151,12 @@ static int _ocf_read_generic_do(struct ocf_request *req)
if (ocf_engine_is_miss(req)) {
if (req->info.dirty_any) {
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Request is dirty need to clean request */
ocf_engine_clean(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
/* We need to clean request before processing, return */
ocf_req_put(req);
@ -164,25 +164,25 @@ static int _ocf_read_generic_do(struct ocf_request *req)
return 0;
}
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Set valid status bits map */
ocf_set_valid_map_info(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
}
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
OCF_DEBUG_RQ(req, "Submit");

View File

@ -23,12 +23,12 @@ int ocf_write_wa(struct ocf_request *req)
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
ocf_hb_req_prot_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(req)) {
ocf_req_clear(req);

View File

@ -28,20 +28,20 @@ static const struct ocf_io_if _io_if_wb_resume = {
static void _ocf_write_wb_update_bits(struct ocf_request *req)
{
if (ocf_engine_is_miss(req)) {
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Update valid status bits */
ocf_set_valid_map_info(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
}
if (!ocf_engine_is_dirty_all(req)) {
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
ocf_req_set_cleaning_hot(req);
@ -127,14 +127,14 @@ static inline void _ocf_write_wb_submit(struct ocf_request *req)
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
OCF_DEBUG_RQ(req, "Submit Data");

View File

@ -96,12 +96,12 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req)
env_atomic_set(&req->req_remaining, 1); /* One core IO */
ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
if (req->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
@ -191,7 +191,7 @@ int ocf_write_wi(struct ocf_request *req)
&_io_if_wi_core_write;
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/
ocf_hb_req_prot_lock_rd(req); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(req);
@ -203,7 +203,7 @@ int ocf_write_wi(struct ocf_request *req)
lock = OCF_LOCK_ACQUIRED;
}
ocf_req_hash_unlock_rd(req); /*- END Metadata READ access----------------*/
ocf_hb_req_prot_unlock_rd(req); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {

View File

@ -213,7 +213,7 @@ int ocf_read_wo(struct ocf_request *req)
req->io_if = &_io_if_wo_resume;
ocf_req_hash(req);
ocf_req_hash_lock_rd(req); /*- Metadata RD access -----------------------*/
ocf_hb_req_prot_lock_rd(req); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(req);
@ -225,7 +225,7 @@ int ocf_read_wo(struct ocf_request *req)
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
}
ocf_req_hash_unlock_rd(req); /*- END Metadata RD access -----------------*/
ocf_hb_req_prot_unlock_rd(req); /*- END Metadata RD access -----------------*/
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {

View File

@ -98,16 +98,16 @@ static inline void _ocf_write_wt_submit(struct ocf_request *req)
static void _ocf_write_wt_update_bits(struct ocf_request *req)
{
if (ocf_engine_is_miss(req)) {
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
/* Update valid status bits */
ocf_set_valid_map_info(req);
ocf_req_hash_unlock_rd(req);
ocf_hb_req_prot_unlock_rd(req);
}
if (req->info.dirty_any) {
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean
@ -115,20 +115,20 @@ static void _ocf_write_wt_update_bits(struct ocf_request *req)
ocf_set_clean_map_info(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
if (ocf_engine_needs_repart(req)) {
OCF_DEBUG_RQ(req, "Re-Part");
ocf_req_hash_lock_wr(req);
ocf_hb_req_prot_lock_wr(req);
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(req);
ocf_req_hash_unlock_wr(req);
ocf_hb_req_prot_unlock_wr(req);
}
}

View File

@ -23,12 +23,12 @@ static int ocf_zero_purge(struct ocf_request *req)
} else {
/* There are mapped cache line, need to remove them */
ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/
ocf_hb_req_prot_lock_wr(req); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(req);
ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
}
ocf_req_unlock_wr(req);

View File

@ -9,7 +9,6 @@
#include "ocf/ocf.h"
#include "lru.h"
#include "lru_structs.h"
#include "../ocf_request.h"
#define OCF_TO_EVICTION_MIN 128UL
#define OCF_PENDING_EVICTION_LIMIT 512UL
@ -17,6 +16,7 @@
#define OCF_NUM_EVICTION_LISTS 32
struct ocf_user_part;
struct ocf_request;
struct eviction_policy {
union {

View File

@ -304,6 +304,7 @@ void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
OCF_CORE_MAX, ULLONG_MAX);
}
/* must be called under global metadata read(shared) lock */
void ocf_metadata_start_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{
@ -316,6 +317,7 @@ void ocf_metadata_start_collision_shared_access(struct ocf_cache *cache,
ocf_collision_start_shared_access(&cache->metadata.lock, page);
}
/* must be called under global metadata read(shared) lock */
void ocf_metadata_end_collision_shared_access(struct ocf_cache *cache,
ocf_cache_line_t line)
{

View File

@ -215,9 +215,11 @@ static int metadata_io_restart_req(struct ocf_request *req)
/* Fill with the latest metadata. */
if (m_req->req.rw == OCF_WRITE) {
ocf_metadata_start_shared_access(&cache->metadata.lock);
ocf_metadata_start_shared_access(&cache->metadata.lock,
m_req->page % OCF_NUM_GLOBAL_META_LOCKS);
metadata_io_req_fill(m_req);
ocf_metadata_end_shared_access(&cache->metadata.lock);
ocf_metadata_end_shared_access(&cache->metadata.lock,
m_req->page % OCF_NUM_GLOBAL_META_LOCKS);
}
io = ocf_new_cache_io(cache, req->io_queue,

View File

@ -43,9 +43,18 @@ struct ocf_cache_line_settings {
uint64_t sector_end;
};
#define OCF_METADATA_GLOBAL_LOCK_IDX_BITS 2
#define OCF_NUM_GLOBAL_META_LOCKS (1 << (OCF_METADATA_GLOBAL_LOCK_IDX_BITS))
struct ocf_metadata_global_lock {
env_rwsem sem;
} __attribute__((aligned(64)));
struct ocf_metadata_lock
{
env_rwsem global; /*!< global metadata lock (GML) */
struct ocf_metadata_global_lock global[OCF_NUM_GLOBAL_META_LOCKS];
/*!< global metadata lock (GML) */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction[OCF_NUM_EVICTION_LISTS]; /*!< Fast lock for eviction policy */
env_rwsem *hash; /*!< Hash bucket locks */

View File

@ -2344,11 +2344,11 @@ ocf_promotion_t ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache)
{
ocf_promotion_t result;
ocf_metadata_start_shared_access(&cache->metadata.lock);
ocf_metadata_start_shared_access(&cache->metadata.lock, 0);
result = cache->conf_meta->promotion_policy_type;
ocf_metadata_end_shared_access(&cache->metadata.lock);
ocf_metadata_end_shared_access(&cache->metadata.lock, 0);
return result;
}
@ -2358,11 +2358,11 @@ int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
{
int result;
ocf_metadata_start_shared_access(&cache->metadata.lock);
ocf_metadata_start_shared_access(&cache->metadata.lock, 0);
result = ocf_promotion_get_param(cache, type, param_id, param_value);
ocf_metadata_end_shared_access(&cache->metadata.lock);
ocf_metadata_end_shared_access(&cache->metadata.lock, 0);
return result;
}

View File

@ -59,10 +59,13 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
ocf_core_id_t iter_core_id;
ocf_cache_line_t curr_cline, prev_cline;
uint32_t hash, num_hash = cache->device->hash_table_entries;
unsigned lock_idx;
for (hash = 0; hash < num_hash;) {
prev_cline = cache->device->collision_table_entries;
ocf_metadata_lock_hash_wr(&cache->metadata.lock, hash);
lock_idx = ocf_metadata_concurrency_next_idx(cache->mngt_queue);
ocf_hb_id_prot_lock_wr(&cache->metadata.lock, lock_idx, hash);
curr_cline = ocf_metadata_get_hash(cache, hash);
while (curr_cline != cache->device->collision_table_entries) {
@ -91,7 +94,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
else
curr_cline = ocf_metadata_get_hash(cache, hash);
}
ocf_metadata_unlock_hash_wr(&cache->metadata.lock, hash);
ocf_hb_id_prot_unlock_wr(&cache->metadata.lock, lock_idx, hash);
/* Check whether all the cachelines from the hash bucket were sparsed */
if (curr_cline == cache->device->collision_table_entries)

View File

@ -18,6 +18,9 @@ struct ocf_queue {
struct list_head io_list;
env_spinlock io_list_lock;
/* per-queue free running global metadata lock index */
unsigned lock_idx;
/* Tracing reference counter */
env_atomic64 trace_ref_cntr;

View File

@ -6,7 +6,7 @@
#include "ocf/ocf.h"
#include "ocf_request.h"
#include "ocf_cache_priv.h"
#include "ocf_queue_priv.h"
#include "concurrency/ocf_metadata_concurrency.h"
#include "utils/utils_cache_line.h"
#define OCF_UTILS_RQ_DEBUG 0
@ -205,6 +205,8 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
req->discard.nr_sects = BYTES_TO_SECTORS(bytes);
req->discard.handled = 0;
req->lock_idx = ocf_metadata_concurrency_next_idx(queue);
return req;
}

View File

@ -9,6 +9,7 @@
#include "ocf_env.h"
#include "ocf_io_priv.h"
#include "engine/cache_engine.h"
#include "metadata/metadata_structs.h"
struct ocf_req_allocator;
@ -190,6 +191,9 @@ struct ocf_request {
uint8_t part_evict : 1;
/* !< Some cachelines from request's partition must be evicted */
uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS;
/* !< Selected global metadata read lock */
log_sid_t sid;
/*!< Tracing sequence ID */

View File

@ -1,6 +1,6 @@
/*
* <tested_file_path>src/concurrency/ocf_metadata_concurrency.c</tested_file_path>
* <tested_function>ocf_req_hash_lock_rd</tested_function>
* <tested_function>ocf_hb_req_prot_lock_rd</tested_function>
* <functions_to_leave>
* INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE
* ONE FUNCTION PER LINE
@ -23,7 +23,7 @@
#include "concurrency/ocf_metadata_concurrency.c/ocf_metadata_concurrency_generated_wraps.c"
void __wrap_ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
void __wrap_ocf_hb_id_naked_lock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
check_expected(hash);
@ -62,15 +62,15 @@ static void _test_lock_order(struct ocf_request* req,
req->map[i].hash = hash[i];
for (i = 0; i < expected_call_count; i++) {
expect_function_call(__wrap_ocf_metadata_hash_lock);
expect_value(__wrap_ocf_metadata_hash_lock, hash, expected_call[i]);
expect_function_call(__wrap_ocf_hb_id_naked_lock);
expect_value(__wrap_ocf_hb_id_naked_lock, hash, expected_call[i]);
}
ocf_req_hash_lock_rd(req);
ocf_hb_req_prot_lock_rd(req);
}
static void ocf_req_hash_lock_rd_test01(void **state)
static void ocf_hb_req_prot_lock_rd_test01(void **state)
{
struct ocf_request *req = alloc_req();
struct {
@ -126,10 +126,10 @@ static void ocf_req_hash_lock_rd_test01(void **state)
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(ocf_req_hash_lock_rd_test01)
cmocka_unit_test(ocf_hb_req_prot_lock_rd_test01)
};
print_message("Unit test for ocf_req_hash_lock_rd\n");
print_message("Unit test for ocf_hb_req_prot_lock_rd\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}