Split global metadata lock

Divide single global lock instance into 4 to reduce contention
in multiple read-locks scenario.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski
2020-12-28 10:34:16 +01:00
parent 10c3c3de36
commit 05780c98ed
11 changed files with 148 additions and 68 deletions

View File

@@ -5,12 +5,14 @@
#include "ocf_metadata_concurrency.h"
#include "../metadata/metadata_misc.h"
#include "../ocf_queue_priv.h"
int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
{
int err = 0;
unsigned evp_iter;
unsigned part_iter;
unsigned global_iter;
for (evp_iter = 0; evp_iter < OCF_NUM_EVICTION_LISTS; evp_iter++) {
err = env_spinlock_init(&metadata_lock->eviction[evp_iter]);
@@ -20,22 +22,29 @@ int ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
env_rwlock_init(&metadata_lock->status);
err = env_rwsem_init(&metadata_lock->global);
if (err)
goto rwsem_err;
for (global_iter = 0; global_iter < OCF_NUM_GLOBAL_META_LOCKS;
global_iter++) {
err = env_rwsem_init(&metadata_lock->global[global_iter]);
if (err)
goto global_err;
}
for (part_iter = 0; part_iter < OCF_IO_CLASS_MAX; part_iter++) {
err = env_spinlock_init(&metadata_lock->partition[part_iter]);
if (err)
goto spinlocks_err;
goto partition_err;
}
return err;
spinlocks_err:
partition_err:
while (part_iter--)
env_spinlock_destroy(&metadata_lock->partition[part_iter]);
rwsem_err:
global_err:
while (global_iter--)
env_rwsem_destroy(&metadata_lock->global[global_iter]);
env_rwlock_destroy(&metadata_lock->status);
eviction_err:
@@ -55,8 +64,10 @@ void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
for (i = 0; i < OCF_NUM_EVICTION_LISTS; i++)
env_spinlock_destroy(&metadata_lock->eviction[i]);
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++)
env_rwsem_destroy(&metadata_lock->global[i]);
env_rwlock_destroy(&metadata_lock->status);
env_rwsem_destroy(&metadata_lock->global);
}
int ocf_metadata_concurrency_attached_init(
@@ -140,34 +151,68 @@ void ocf_metadata_concurrency_attached_deinit(
void ocf_metadata_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_down_write(&metadata_lock->global);
unsigned i;
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++) {
env_rwsem_down_write(&metadata_lock->global[i]);
}
}
int ocf_metadata_try_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
return env_rwsem_down_write_trylock(&metadata_lock->global);
unsigned i;
int error;
for (i = 0; i < OCF_NUM_GLOBAL_META_LOCKS; i++) {
error = env_rwsem_down_write_trylock(&metadata_lock->global[i]);
if (error)
break;
}
if (error) {
while (i--) {
env_rwsem_up_write(&metadata_lock->global[i]);
}
}
return error;
}
void ocf_metadata_end_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_up_write(&metadata_lock->global);
unsigned i;
for (i = OCF_NUM_GLOBAL_META_LOCKS; i > 0; i--)
env_rwsem_up_write(&metadata_lock->global[i - 1]);
}
void ocf_metadata_start_shared_access(struct ocf_metadata_lock *metadata_lock)
/* lock_idx determines which of underlying R/W locks is acquired for read. The goal
is to spread calls across all available underlying locks to reduce contention
on one single RW semaphor primitive. Technically any value is correct, but
picking wisely would allow for higher read througput:
* free running per-cpu counter sounds good,
* for rarely excercised code paths (e.g. management) any value would do.
*/
void ocf_metadata_start_shared_access(
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
env_rwsem_down_read(&metadata_lock->global);
env_rwsem_down_read(&metadata_lock->global[lock_idx]);
}
int ocf_metadata_try_start_shared_access(struct ocf_metadata_lock *metadata_lock)
int ocf_metadata_try_start_shared_access(
struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
return env_rwsem_down_read_trylock(&metadata_lock->global);
return env_rwsem_down_read_trylock(&metadata_lock->global[lock_idx]);
}
void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock)
void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx)
{
env_rwsem_up_read(&metadata_lock->global);
env_rwsem_up_read(&metadata_lock->global[lock_idx]);
}
/* NOTE: Calling 'naked' lock/unlock requires caller to hold global metadata
@@ -267,74 +312,74 @@ void ocf_hb_cline_naked_unlock_wr(struct ocf_metadata_lock *metadata_lock,
/* common part of protected hash bucket lock routines */
static inline void ocf_hb_id_prot_lock_common(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
uint32_t lock_idx, ocf_cache_line_t hash, int rw)
{
ocf_metadata_start_shared_access(metadata_lock);
ocf_metadata_start_shared_access(metadata_lock, lock_idx);
ocf_hb_id_naked_lock(metadata_lock, hash, rw);
}
/* common part of protected hash bucket unlock routines */
static inline void ocf_hb_id_prot_unlock_common(
struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
uint32_t lock_idx, ocf_cache_line_t hash, int rw)
{
ocf_hb_id_naked_unlock(metadata_lock, hash, rw);
ocf_metadata_end_shared_access(metadata_lock);
ocf_metadata_end_shared_access(metadata_lock, lock_idx);
}
/* NOTE: caller can lock at most one hash bucket at a time using protected
variants of lock routines. */
void ocf_hb_cline_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_lock_common(metadata_lock, hash,
OCF_METADATA_WR);
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_WR);
}
void ocf_hb_cline_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_unlock_common(metadata_lock, hash,
OCF_METADATA_WR);
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_WR);
}
void ocf_hb_cline_prot_lock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_lock_common(metadata_lock, hash,
OCF_METADATA_RD);
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_RD);
}
void ocf_hb_cline_prot_unlock_rd(struct ocf_metadata_lock *metadata_lock,
uint32_t core_id, uint64_t core_line)
uint32_t lock_idx, uint32_t core_id, uint64_t core_line)
{
ocf_cache_line_t hash = ocf_metadata_hash_func(metadata_lock->cache,
core_line, core_id);
ocf_hb_id_prot_unlock_common(metadata_lock, hash,
OCF_METADATA_RD);
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx,
hash, OCF_METADATA_RD);
}
void ocf_hb_hash_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
void ocf_hb_id_prot_lock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash)
{
ocf_hb_id_prot_lock_common(metadata_lock, hash,
ocf_hb_id_prot_lock_common(metadata_lock, lock_idx, hash,
OCF_METADATA_WR);
}
void ocf_hb_hash_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash)
void ocf_hb_id_prot_unlock_wr(struct ocf_metadata_lock *metadata_lock,
unsigned lock_idx, ocf_cache_line_t hash)
{
ocf_hb_id_prot_unlock_common(metadata_lock, hash,
ocf_hb_id_prot_unlock_common(metadata_lock, lock_idx, hash,
OCF_METADATA_WR);
}
@@ -392,7 +437,8 @@ void ocf_hb_req_prot_lock_rd(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
ocf_metadata_start_shared_access(&req->cache->metadata.lock,
req->lock_idx);
for_each_req_hash_asc(req, hash) {
ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
@@ -407,14 +453,16 @@ void ocf_hb_req_prot_unlock_rd(struct ocf_request *req)
ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
ocf_metadata_end_shared_access(&req->cache->metadata.lock,
req->lock_idx);
}
void ocf_hb_req_prot_lock_wr(struct ocf_request *req)
{
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
ocf_metadata_start_shared_access(&req->cache->metadata.lock,
req->lock_idx);
for_each_req_hash_asc(req, hash) {
ocf_hb_id_naked_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
@@ -443,7 +491,8 @@ void ocf_hb_req_prot_unlock_wr(struct ocf_request *req)
ocf_hb_id_naked_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
ocf_metadata_end_shared_access(&req->cache->metadata.lock,
req->lock_idx);
}
void ocf_collision_start_shared_access(struct ocf_metadata_lock *metadata_lock,