Introduce hash bucket locks

There is one RW lock per hash bucket. Write lock is required
to map cacheline, read lock is sufficient for traversing.
Hash bucket locks are always acquired under global metadata
read lock. This assures mutual exclusion with eviction and
management paths, where global metadata write lock is held.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2019-08-01 16:57:14 -04:00
parent 42f65c3fbb
commit d91012f4b4
7 changed files with 298 additions and 87 deletions

View File

@ -5,21 +5,219 @@
#include "ocf_metadata_concurrency.h"
void ocf_metadata_concurrency_init(struct ocf_cache *cache)
void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock)
{
env_spinlock_init(&cache->metadata.lock.eviction);
env_rwlock_init(&cache->metadata.lock.status);
env_rwsem_init(&cache->metadata.lock.collision);
env_spinlock_init(&metadata_lock->eviction);
env_rwlock_init(&metadata_lock->status);
env_rwsem_init(&metadata_lock->global);
}
void ocf_metadata_concurrency_deinit(struct ocf_cache *cache)
void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock)
{
env_spinlock_destroy(&cache->metadata.lock.eviction);
env_rwlock_destroy(&cache->metadata.lock.status);
env_rwsem_destroy(&cache->metadata.lock.collision);
env_spinlock_destroy(&metadata_lock->eviction);
env_rwlock_destroy(&metadata_lock->status);
env_rwsem_destroy(&metadata_lock->global);
}
int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache)
int ocf_metadata_concurrency_attached_init(
struct ocf_metadata_lock *metadata_lock,
uint64_t hash_table_entries)
{
uint64_t i;
metadata_lock->num_hash_entries = hash_table_entries;
metadata_lock->hash = env_vzalloc(sizeof(env_rwsem) *
hash_table_entries);
if (!metadata_lock->hash)
return -OCF_ERR_NO_MEM;
for (i = 0; i < hash_table_entries; i++)
env_rwsem_init(&metadata_lock->hash[i]);
return 0;
}
void ocf_metadata_concurrency_attached_deinit(
struct ocf_metadata_lock *metadata_lock)
{
uint64_t i;
for (i = 0; i < metadata_lock->num_hash_entries; i++)
env_rwsem_destroy(&metadata_lock->hash[i]);
env_vfree(metadata_lock->hash);
}
void ocf_metadata_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_down_write(&metadata_lock->global);
}
int ocf_metadata_try_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
return env_rwsem_down_write_trylock(&metadata_lock->global);
}
void ocf_metadata_end_exclusive_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_up_write(&metadata_lock->global);
}
void ocf_metadata_start_shared_access(
struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_down_read(&metadata_lock->global);
}
int ocf_metadata_try_start_shared_access(
struct ocf_metadata_lock *metadata_lock)
{
return env_rwsem_down_read_trylock(&metadata_lock->global);
}
void ocf_metadata_end_shared_access(struct ocf_metadata_lock *metadata_lock)
{
env_rwsem_up_read(&metadata_lock->global);
}
void ocf_metadata_hash_lock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
if (rw == OCF_METADATA_WR)
env_rwsem_down_write(&metadata_lock->hash[hash]);
else if (rw == OCF_METADATA_RD)
env_rwsem_down_read(&metadata_lock->hash[hash]);
else
ENV_BUG();
}
void ocf_metadata_hash_unlock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
if (rw == OCF_METADATA_WR)
env_rwsem_up_write(&metadata_lock->hash[hash]);
else if (rw == OCF_METADATA_RD)
env_rwsem_up_read(&metadata_lock->hash[hash]);
else
ENV_BUG();
}
int ocf_metadata_hash_try_lock(struct ocf_metadata_lock *metadata_lock,
ocf_cache_line_t hash, int rw)
{
int result = -1;
ENV_BUG_ON(hash >= metadata_lock->num_hash_entries);
if (rw == OCF_METADATA_WR) {
result = env_rwsem_down_write_trylock(
&metadata_lock->hash[hash]);
} else if (rw == OCF_METADATA_RD) {
result = env_rwsem_down_read_trylock(
&metadata_lock->hash[hash]);
} else {
ENV_BUG();
}
if (!result)
return -1;
return 0;
}
#define _NUM_HASH_ENTRIES req->cache->metadata.lock.num_hash_entries
/*
* Iterate over hash buckets for all core lines in the request in ascending hash
* bucket value order. Each hash bucket is visited only once.
*
* @i is used as iteration counter, starting from 0
* @hash stores hash values for each iteration
* @start is internal helper variable. It set to the index of first occurence
* of hash with minimal value within the request.
*
* Example hash iteration order for _NUM_HASH_ENTRIES == 5:
* Request hashes Iteration order start
* [2, 3, 4] [2, 3, 4] 0
* [2, 3, 4, 0] [0, 2, 3, 4] 3
* [2, 3, 4, 0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4] 3
* [4, 0] [0, 4] 1
* [0, 1, 2, 3, 4, 0, 1] [0, 1, 2, 3, 4] 0
*
*/
#define for_each_req_hash_asc(req, i, hash, start) \
for (i = 0, start = (req->map[0].hash + req->core_line_count <= \
_NUM_HASH_ENTRIES) ? 0 : (_NUM_HASH_ENTRIES - req->map[0].hash)\
% _NUM_HASH_ENTRIES, hash = req->map[start].hash; \
i < OCF_MIN(req->core_line_count, _NUM_HASH_ENTRIES); \
i++, hash = req->map[(start + i) % req->core_line_count].hash)
void ocf_req_hash_lock_rd(struct ocf_request *req)
{
unsigned i, start;
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
}
void ocf_req_hash_unlock_rd(struct ocf_request *req)
{
unsigned i, start;
ocf_cache_line_t hash;
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
}
void ocf_req_hash_lock_wr(struct ocf_request *req)
{
unsigned i, start;
ocf_cache_line_t hash;
ocf_metadata_start_shared_access(&req->cache->metadata.lock);
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
void ocf_req_hash_lock_upgrade(struct ocf_request *req)
{
unsigned i, start;
ocf_cache_line_t hash;
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_RD);
}
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_lock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
}
void ocf_req_hash_unlock_wr(struct ocf_request *req)
{
unsigned i, start;
ocf_cache_line_t hash;
for_each_req_hash_asc(req, i, hash, start) {
ocf_metadata_hash_unlock(&req->cache->metadata.lock, hash,
OCF_METADATA_WR);
}
ocf_metadata_end_shared_access(&req->cache->metadata.lock);
}

View File

@ -10,125 +10,113 @@
#define OCF_METADATA_RD 0
#define OCF_METADATA_WR 1
void ocf_metadata_concurrency_init(struct ocf_cache *cache);
void ocf_metadata_concurrency_init(struct ocf_metadata_lock *metadata_lock);
void ocf_metadata_concurrency_deinit(struct ocf_cache *cache);
void ocf_metadata_concurrency_deinit(struct ocf_metadata_lock *metadata_lock);
int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache);
int ocf_metadata_concurrency_attached_init(
struct ocf_metadata_lock *metadata_lock,
uint64_t hash_table_entries);
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
void ocf_metadata_concurrency_attached_deinit(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_eviction_lock(
struct ocf_metadata_lock *metadata_lock)
{
env_spinlock_lock(&cache->metadata.lock.eviction);
env_spinlock_lock(&metadata_lock->eviction);
}
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
static inline void ocf_metadata_eviction_unlock(
struct ocf_metadata_lock *metadata_lock)
{
env_spinlock_unlock(&cache->metadata.lock.eviction);
env_spinlock_unlock(&metadata_lock->eviction);
}
#define OCF_METADATA_EVICTION_LOCK() \
ocf_metadata_eviction_lock(cache)
ocf_metadata_eviction_lock(&cache->metadata.lock)
#define OCF_METADATA_EVICTION_UNLOCK() \
ocf_metadata_eviction_unlock(cache)
ocf_metadata_eviction_unlock(&cache->metadata.lock)
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_down_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_down_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
void ocf_metadata_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock);
int ocf_metadata_try_start_exclusive_access(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_up_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_up_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
void ocf_metadata_end_exclusive_access(
struct ocf_metadata_lock *metadata_lock);
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
{
int result = 0;
int ocf_metadata_try_start_shared_access(
struct ocf_metadata_lock *metadata_lock);
if (rw == OCF_METADATA_WR) {
result = env_rwsem_down_write_trylock(
&cache->metadata.lock.collision);
} else if (rw == OCF_METADATA_RD) {
result = env_rwsem_down_read_trylock(
&cache->metadata.lock.collision);
} else {
ENV_BUG();
}
void ocf_metadata_start_shared_access(
struct ocf_metadata_lock *metadata_lock);
if (result)
return -1;
return 0;
}
void ocf_metadata_end_shared_access(
struct ocf_metadata_lock *metadata_lock);
static inline void ocf_metadata_status_bits_lock(
struct ocf_cache *cache, int rw)
struct ocf_metadata_lock *metadata_lock, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_lock(&cache->metadata.lock.status);
env_rwlock_write_lock(&metadata_lock->status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_lock(&cache->metadata.lock.status);
env_rwlock_read_lock(&metadata_lock->status);
else
ENV_BUG();
}
static inline void ocf_metadata_status_bits_unlock(
struct ocf_cache *cache, int rw)
struct ocf_metadata_lock *metadata_lock, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_unlock(&cache->metadata.lock.status);
env_rwlock_write_unlock(&metadata_lock->status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_unlock(&cache->metadata.lock.status);
env_rwlock_read_unlock(&metadata_lock->status);
else
ENV_BUG();
}
#define OCF_METADATA_LOCK_RD() \
ocf_metadata_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_RD() ocf_metadata_start_shared_access( \
&cache->metadata.lock)
#define OCF_METADATA_UNLOCK_RD() \
ocf_metadata_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_UNLOCK_RD() ocf_metadata_end_shared_access( \
&cache->metadata.lock)
#define OCF_METADATA_LOCK_RD_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_RD_TRY() ocf_metadata_try_start_shared_access( \
&cache->metadata.lock)
#define OCF_METADATA_LOCK_WR() \
ocf_metadata_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_LOCK_WR() ocf_metadata_start_exclusive_access( \
&cache->metadata.lock)
#define OCF_METADATA_LOCK_WR_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
ocf_metadata_try_start_exclusive_access(&cache->metadata.lock)
#define OCF_METADATA_UNLOCK_WR() \
ocf_metadata_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_UNLOCK_WR() ocf_metadata_end_exclusive_access( \
&cache->metadata.lock)
#define OCF_METADATA_BITS_LOCK_RD() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
ocf_metadata_status_bits_lock(&cache->metadata.lock, \
OCF_METADATA_RD)
#define OCF_METADATA_BITS_UNLOCK_RD() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
OCF_METADATA_RD)
#define OCF_METADATA_BITS_LOCK_WR() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
ocf_metadata_status_bits_lock(&cache->metadata.lock, \
OCF_METADATA_WR)
#define OCF_METADATA_BITS_UNLOCK_WR() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
ocf_metadata_status_bits_unlock(&cache->metadata.lock, \
OCF_METADATA_WR)
#define OCF_METADATA_FLUSH_LOCK() \
ocf_metadata_flush_lock(cache)
#define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache)
void ocf_req_hash_lock_rd(struct ocf_request *req);
void ocf_req_hash_unlock_rd(struct ocf_request *req);
void ocf_req_hash_lock_wr(struct ocf_request *req);
void ocf_req_hash_unlock_wr(struct ocf_request *req);
void ocf_req_hash_lock_upgrade(struct ocf_request *req);
#endif

View File

@ -39,7 +39,7 @@ int ocf_metadata_init(struct ocf_cache *cache,
return ret;
}
ocf_metadata_concurrency_init(cache);
ocf_metadata_concurrency_init(&cache->metadata.lock);
return 0;
}
@ -73,7 +73,7 @@ void ocf_metadata_deinit(struct ocf_cache *cache)
cache->metadata.iface.deinit(cache);
}
ocf_metadata_concurrency_deinit(cache);
ocf_metadata_concurrency_deinit(&cache->metadata.lock);
ocf_metadata_io_deinit(cache);
}

View File

@ -428,6 +428,15 @@ struct ocf_cache_line_settings {
uint64_t sector_end;
};
struct ocf_metadata_lock
{
env_rwsem global; /*!< global metadata lock (GML) */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction; /*!< Fast lock for eviction policy */
env_rwsem *hash; /*!< Hash bucket locks */
uint32_t num_hash_entries; /*!< Hash bucket count */
};
/**
* @brief Metadata control structure
*/
@ -444,11 +453,7 @@ struct ocf_metadata {
bool is_volatile;
/*!< true if metadata used in volatile mode (RAM only) */
struct {
env_rwsem collision; /*!< lock for collision table */
env_rwlock status; /*!< Fast lock for status bits */
env_spinlock eviction; /*!< Fast lock for eviction policy */
} lock;
struct ocf_metadata_lock lock;
};
#endif /* __METADATA_STRUCTS_H__ */

View File

@ -986,7 +986,8 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
context->flags.attached_metadata_inited = true;
if (ocf_metadata_concurrency_attached_init(cache)) {
if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock,
cache->device->hash_table_entries)) {
ocf_cache_log(cache, log_err, "Failed to initialize attached "
"metadata concurrency\n");
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
@ -1734,6 +1735,7 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
ocf_volume_close(&cache->device->volume);
ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock);
ocf_metadata_deinit_variable_size(cache);
ocf_concurrency_deinit(cache);
ocf_freelist_deinit(cache->freelist);

View File

@ -311,3 +311,14 @@ void ocf_req_clear_map(struct ocf_request *req)
ENV_BUG_ON(env_memset(req->map,
sizeof(req->map[0]) * req->core_line_count, 0));
}
void ocf_req_hash(struct ocf_request *req)
{
int i;
for (i = 0; i < req->core_line_count; i++) {
req->map[i].hash = ocf_metadata_hash_func(req->cache,
req->core_line_first + i,
ocf_core_get_id(req->core));
}
}

View File

@ -319,6 +319,13 @@ void ocf_req_clear_info(struct ocf_request *req);
*/
void ocf_req_clear_map(struct ocf_request *req);
/**
* @brief Calculate hashes for all core lines within the request
*
* @param req - OCF request
*/
void ocf_req_hash(struct ocf_request *req);
/**
* @brief Clear OCF request
*