Merge pull request #221 from arutk/metadata_rename_concurrency
Restrutcure concurrency code
This commit is contained in:
commit
fa14d6a4b5
@ -12,7 +12,7 @@
|
||||
#include "../ocf_request.h"
|
||||
#include "../cleaning/acp.h"
|
||||
#include "../engine/engine_common.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "cleaning_priv.h"
|
||||
|
||||
#define OCF_ACP_DEBUG 0
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_realloc.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
#include "cleaning_priv.h"
|
||||
|
||||
|
@ -52,7 +52,7 @@ struct __waiters_list {
|
||||
env_spinlock lock;
|
||||
};
|
||||
|
||||
struct ocf_cache_concurrency {
|
||||
struct ocf_cache_line_concurrency {
|
||||
env_rwlock lock;
|
||||
env_atomic *access;
|
||||
env_atomic waiting;
|
||||
@ -69,24 +69,24 @@ struct ocf_cache_concurrency {
|
||||
#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency"
|
||||
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
|
||||
|
||||
int ocf_cache_concurrency_init(struct ocf_cache *cache)
|
||||
int ocf_cache_line_concurrency_init(struct ocf_cache *cache)
|
||||
{
|
||||
uint32_t i;
|
||||
int error = 0;
|
||||
struct ocf_cache_concurrency *c;
|
||||
struct ocf_cache_line_concurrency *c;
|
||||
char name[ALLOCATOR_NAME_MAX];
|
||||
|
||||
ENV_BUG_ON(cache->device->concurrency.cache);
|
||||
ENV_BUG_ON(cache->device->concurrency.cache_line);
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
c = env_vmalloc(sizeof(*c));
|
||||
if (!c) {
|
||||
error = __LINE__;
|
||||
goto ocf_cache_concurrency_init;
|
||||
goto ocf_cache_line_concurrency_init;
|
||||
}
|
||||
|
||||
cache->device->concurrency.cache = c;
|
||||
cache->device->concurrency.cache_line = c;
|
||||
|
||||
OCF_REALLOC_INIT(&c->access, &c->access_limit);
|
||||
OCF_REALLOC_CP(&c->access, sizeof(c->access[0]),
|
||||
@ -94,19 +94,19 @@ int ocf_cache_concurrency_init(struct ocf_cache *cache)
|
||||
|
||||
if (!c->access) {
|
||||
error = __LINE__;
|
||||
goto ocf_cache_concurrency_init;
|
||||
goto ocf_cache_line_concurrency_init;
|
||||
}
|
||||
|
||||
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
|
||||
ocf_cache_get_name(cache)) < 0) {
|
||||
error = __LINE__;
|
||||
goto ocf_cache_concurrency_init;
|
||||
goto ocf_cache_line_concurrency_init;
|
||||
}
|
||||
|
||||
c->allocator = env_allocator_create(sizeof(struct __waiter), name);
|
||||
if (!c->allocator) {
|
||||
error = __LINE__;
|
||||
goto ocf_cache_concurrency_init;
|
||||
goto ocf_cache_line_concurrency_init;
|
||||
}
|
||||
|
||||
/* Init concurrency control table */
|
||||
@ -119,12 +119,12 @@ int ocf_cache_concurrency_init(struct ocf_cache *cache)
|
||||
|
||||
return 0;
|
||||
|
||||
ocf_cache_concurrency_init:
|
||||
ocf_cache_line_concurrency_init:
|
||||
|
||||
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
|
||||
"ERROR %d", error);
|
||||
|
||||
ocf_cache_concurrency_deinit(cache);
|
||||
ocf_cache_line_concurrency_deinit(cache);
|
||||
|
||||
return -1;
|
||||
}
|
||||
@ -132,16 +132,16 @@ ocf_cache_concurrency_init:
|
||||
/*
|
||||
*
|
||||
*/
|
||||
void ocf_cache_concurrency_deinit(struct ocf_cache *cache)
|
||||
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache)
|
||||
{
|
||||
struct ocf_cache_concurrency *concurrency;
|
||||
struct ocf_cache_line_concurrency *concurrency;
|
||||
|
||||
if (!cache->device->concurrency.cache)
|
||||
if (!cache->device->concurrency.cache_line)
|
||||
return;
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
concurrency = cache->device->concurrency.cache;
|
||||
concurrency = cache->device->concurrency.cache_line;
|
||||
|
||||
if (concurrency->access)
|
||||
OCF_REALLOC_DEINIT(&concurrency->access,
|
||||
@ -151,17 +151,17 @@ void ocf_cache_concurrency_deinit(struct ocf_cache *cache)
|
||||
env_allocator_destroy(concurrency->allocator);
|
||||
|
||||
env_vfree(concurrency);
|
||||
cache->device->concurrency.cache = NULL;
|
||||
cache->device->concurrency.cache_line = NULL;
|
||||
}
|
||||
|
||||
size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache)
|
||||
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
size = sizeof(env_atomic);
|
||||
size *= cache->device->collision_table_entries;
|
||||
|
||||
size += sizeof(struct ocf_cache_concurrency);
|
||||
size += sizeof(struct ocf_cache_line_concurrency);
|
||||
|
||||
return size;
|
||||
}
|
||||
@ -169,7 +169,7 @@ size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache)
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __are_waiters(struct ocf_cache_concurrency *c,
|
||||
static inline bool __are_waiters(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
bool are = false;
|
||||
@ -197,7 +197,7 @@ static inline bool __are_waiters(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline void __add_waiter(struct ocf_cache_concurrency *c,
|
||||
static inline void __add_waiter(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line, struct __waiter *waiter)
|
||||
{
|
||||
uint32_t idx = _WAITERS_LIST_ITEM(line);
|
||||
@ -225,7 +225,7 @@ static inline void __add_waiter(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_wr(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_wr(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -241,7 +241,7 @@ static inline bool __try_lock_wr(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_rd_idle(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_rd_idle(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -254,7 +254,7 @@ static inline bool __try_lock_rd_idle(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_rd(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_rd(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -265,7 +265,7 @@ static inline bool __try_lock_rd(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline void __unlock_wr(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_wr(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -277,7 +277,7 @@ static inline void __unlock_wr(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline void __unlock_rd(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_rd(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -290,7 +290,7 @@ static inline void __unlock_rd(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_wr2wr(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_wr2wr(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -302,7 +302,7 @@ static inline bool __try_lock_wr2wr(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_wr2rd(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_wr2rd(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -315,7 +315,7 @@ static inline bool __try_lock_wr2rd(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_rd2wr(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_rd2wr(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -334,7 +334,7 @@ static inline bool __try_lock_rd2wr(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __try_lock_rd2rd(struct ocf_cache_concurrency *c,
|
||||
static inline bool __try_lock_rd2rd(struct ocf_cache_line_concurrency *c,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
env_atomic *access = &c->access[line];
|
||||
@ -350,7 +350,7 @@ static inline bool __try_lock_rd2rd(struct ocf_cache_concurrency *c,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline bool __lock_cache_line_wr(struct ocf_cache_concurrency *c,
|
||||
static inline bool __lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line, __on_lock on_lock,
|
||||
void *ctx, uint32_t ctx_id)
|
||||
{
|
||||
@ -407,7 +407,7 @@ static inline bool __lock_cache_line_wr(struct ocf_cache_concurrency *c,
|
||||
* Attempt to lock cache line for read.
|
||||
* In case cache line is locked, attempt to add caller on wait list.
|
||||
*/
|
||||
static inline bool __lock_cache_line_rd(struct ocf_cache_concurrency *c,
|
||||
static inline bool __lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line, __on_lock on_lock,
|
||||
void *ctx, uint32_t ctx_id)
|
||||
{
|
||||
@ -465,7 +465,7 @@ static inline bool __lock_cache_line_rd(struct ocf_cache_concurrency *c,
|
||||
return locked || waiting;
|
||||
}
|
||||
|
||||
static inline void __unlock_cache_line_rd_common(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_cache_line_rd_common(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line)
|
||||
{
|
||||
bool locked = false;
|
||||
@ -534,7 +534,7 @@ static inline void __unlock_cache_line_rd_common(struct ocf_cache_concurrency *c
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline void __unlock_cache_line_rd(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_cache_line_rd(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
@ -546,7 +546,7 @@ static inline void __unlock_cache_line_rd(struct ocf_cache_concurrency *c,
|
||||
}
|
||||
|
||||
|
||||
static inline void __unlock_cache_line_wr_common(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_cache_line_wr_common(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
@ -615,7 +615,7 @@ static inline void __unlock_cache_line_wr_common(struct ocf_cache_concurrency *c
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static inline void __unlock_cache_line_wr(struct ocf_cache_concurrency *c,
|
||||
static inline void __unlock_cache_line_wr(struct ocf_cache_line_concurrency *c,
|
||||
const ocf_cache_line_t line)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
@ -631,7 +631,7 @@ static inline void __unlock_cache_line_wr(struct ocf_cache_concurrency *c,
|
||||
* Request can be assigned with lock asynchronously at any point of time,
|
||||
* so need to check lock state under a common lock.
|
||||
*/
|
||||
static inline void __remove_line_from_waiters_list(struct ocf_cache_concurrency *c,
|
||||
static inline void __remove_line_from_waiters_list(struct ocf_cache_line_concurrency *c,
|
||||
struct ocf_request *req, int i, void *ctx, int rw)
|
||||
{
|
||||
ocf_cache_line_t line = req->map[i].coll_idx;
|
||||
@ -670,7 +670,7 @@ static int _ocf_req_lock_rd_common(struct ocf_request *req, void *context,
|
||||
{
|
||||
bool locked, waiting;
|
||||
int32_t i;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(req, "Lock");
|
||||
@ -783,7 +783,7 @@ static void _req_on_lock(void *ctx, uint32_t ctx_id,
|
||||
ocf_cache_line_t line, int rw)
|
||||
{
|
||||
struct ocf_request *req = ctx;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
|
||||
if (rw == OCF_READ)
|
||||
req->map[ctx_id].rd_locked = true;
|
||||
@ -818,7 +818,7 @@ static int _ocf_req_lock_wr_common(struct ocf_request *req, void *context,
|
||||
{
|
||||
bool locked, waiting;
|
||||
int32_t i;
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
OCF_DEBUG_RQ(req, "Lock");
|
||||
@ -937,7 +937,7 @@ int ocf_req_trylock_wr(struct ocf_request *req)
|
||||
*/
|
||||
void ocf_req_unlock_rd(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
@ -965,7 +965,7 @@ void ocf_req_unlock_rd(struct ocf_request *req)
|
||||
*/
|
||||
void ocf_req_unlock_wr(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
@ -993,7 +993,7 @@ void ocf_req_unlock_wr(struct ocf_request *req)
|
||||
*/
|
||||
void ocf_req_unlock(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
int32_t i;
|
||||
ocf_cache_line_t line;
|
||||
|
||||
@ -1029,7 +1029,7 @@ void ocf_req_unlock(struct ocf_request *req)
|
||||
void ocf_req_unlock_entry(struct ocf_cache *cache,
|
||||
struct ocf_request *req, uint32_t entry)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = req->cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = req->cache->device->concurrency.cache_line;
|
||||
|
||||
ENV_BUG_ON(req->map[entry].status == LOOKUP_MISS);
|
||||
|
||||
@ -1052,7 +1052,7 @@ void ocf_req_unlock_entry(struct ocf_cache *cache,
|
||||
bool ocf_cache_line_is_used(struct ocf_cache *cache,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
||||
|
||||
ENV_BUG_ON(line >= cache->device->collision_table_entries);
|
||||
|
||||
@ -1071,7 +1071,7 @@ bool ocf_cache_line_is_used(struct ocf_cache *cache,
|
||||
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
|
||||
ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
||||
bool are;
|
||||
unsigned long flags = 0;
|
||||
|
||||
@ -1090,16 +1090,16 @@ bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
|
||||
/*
|
||||
*
|
||||
*/
|
||||
uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache)
|
||||
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
||||
|
||||
return env_atomic_read(&c->waiting);
|
||||
}
|
||||
|
||||
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
||||
return __lock_cache_line_rd(c, line, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
@ -1108,7 +1108,7 @@ bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
|
||||
*/
|
||||
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line)
|
||||
{
|
||||
struct ocf_cache_concurrency *c = cache->device->concurrency.cache;
|
||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
||||
|
||||
OCF_DEBUG_RQ(cache, "Cache line = %u", line);
|
||||
|
@ -14,7 +14,7 @@
|
||||
/**
|
||||
* @brief OCF cache concurrency module handle
|
||||
*/
|
||||
struct ocf_cache_concurrency;
|
||||
struct ocf_cache_line_concurrency;
|
||||
|
||||
/**
|
||||
* @brief Initialize OCF cache concurrency module
|
||||
@ -22,14 +22,14 @@ struct ocf_cache_concurrency;
|
||||
* @param cache - OCF cache instance
|
||||
* @return 0 - Initialization successful, otherwise ERROR
|
||||
*/
|
||||
int ocf_cache_concurrency_init(struct ocf_cache *cache);
|
||||
int ocf_cache_line_concurrency_init(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @biref De-Initialize OCF cache concurrency module
|
||||
*
|
||||
* @param cache - OCF cache instance
|
||||
*/
|
||||
void ocf_cache_concurrency_deinit(struct ocf_cache *cache);
|
||||
void ocf_cache_line_concurrency_deinit(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @brief Get number of waiting (suspended) OCF requests in due to cache
|
||||
@ -39,7 +39,7 @@ void ocf_cache_concurrency_deinit(struct ocf_cache *cache);
|
||||
*
|
||||
* @return Number of suspended OCF requests
|
||||
*/
|
||||
uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache);
|
||||
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @brief Return memory footprint conusmed by cache concurrency module
|
||||
@ -48,7 +48,7 @@ uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache);
|
||||
*
|
||||
* @return Memory footprint of cache concurrency module
|
||||
*/
|
||||
size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache);
|
||||
size_t ocf_cache_line_concurrency_size_of(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @brief Lock OCF request for WRITE access (Lock all cache lines in map info)
|
@ -9,7 +9,7 @@ int ocf_concurrency_init(struct ocf_cache *cache)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
result = ocf_cache_concurrency_init(cache);
|
||||
result = ocf_cache_line_concurrency_init(cache);
|
||||
|
||||
if (result)
|
||||
ocf_concurrency_deinit(cache);
|
||||
@ -19,6 +19,6 @@ int ocf_concurrency_init(struct ocf_cache *cache)
|
||||
|
||||
void ocf_concurrency_deinit(struct ocf_cache *cache)
|
||||
{
|
||||
ocf_cache_concurrency_deinit(cache);
|
||||
ocf_cache_line_concurrency_deinit(cache);
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,6 @@ int ocf_concurrency_init(struct ocf_cache *cache);
|
||||
*/
|
||||
void ocf_concurrency_deinit(struct ocf_cache *cache);
|
||||
|
||||
#include "ocf_cache_concurrency.h"
|
||||
#include "ocf_cache_line_concurrency.h"
|
||||
|
||||
#endif /* OCF_CONCURRENCY_H_ */
|
||||
|
18
src/concurrency/ocf_metadata_concurrency.c
Normal file
18
src/concurrency/ocf_metadata_concurrency.c
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Copyright(c) 2019-2019 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
|
||||
#include "ocf_metadata_concurrency.h"
|
||||
|
||||
void ocf_metadata_concurrency_init(struct ocf_cache *cache)
|
||||
{
|
||||
env_spinlock_init(&cache->metadata.lock.eviction);
|
||||
env_rwlock_init(&cache->metadata.lock.status);
|
||||
env_rwsem_init(&cache->metadata.lock.collision);
|
||||
}
|
||||
|
||||
int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache)
|
||||
{
|
||||
return 0;
|
||||
}
|
132
src/concurrency/ocf_metadata_concurrency.h
Normal file
132
src/concurrency/ocf_metadata_concurrency.h
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright(c) 2019-2019 Intel Corporation
|
||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
*/
|
||||
#include "../ocf_cache_priv.h"
|
||||
|
||||
#ifndef __OCF_METADATA_CONCURRENCY_H__
|
||||
#define __OCF_METADATA_CONCURRENCY_H__
|
||||
|
||||
#define OCF_METADATA_RD 0
|
||||
#define OCF_METADATA_WR 1
|
||||
|
||||
void ocf_metadata_concurrency_init(struct ocf_cache *cache);
|
||||
|
||||
int ocf_metadata_concurrency_attached_init(struct ocf_cache *cache);
|
||||
|
||||
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
|
||||
{
|
||||
env_spinlock_lock(&cache->metadata.lock.eviction);
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
|
||||
{
|
||||
env_spinlock_unlock(&cache->metadata.lock.eviction);
|
||||
}
|
||||
|
||||
#define OCF_METADATA_EVICTION_LOCK() \
|
||||
ocf_metadata_eviction_lock(cache)
|
||||
|
||||
#define OCF_METADATA_EVICTION_UNLOCK() \
|
||||
ocf_metadata_eviction_unlock(cache)
|
||||
|
||||
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwsem_down_write(&cache->metadata.lock.collision);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwsem_down_read(&cache->metadata.lock.collision);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
|
||||
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwsem_up_write(&cache->metadata.lock.collision);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwsem_up_read(&cache->metadata.lock.collision);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
if (rw == OCF_METADATA_WR) {
|
||||
result = env_rwsem_down_write_trylock(
|
||||
&cache->metadata.lock.collision);
|
||||
} else if (rw == OCF_METADATA_RD) {
|
||||
result = env_rwsem_down_read_trylock(
|
||||
&cache->metadata.lock.collision);
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
if (result)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_status_bits_lock(
|
||||
struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwlock_write_lock(&cache->metadata.lock.status);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwlock_read_lock(&cache->metadata.lock.status);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_status_bits_unlock(
|
||||
struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwlock_write_unlock(&cache->metadata.lock.status);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwlock_read_unlock(&cache->metadata.lock.status);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
#define OCF_METADATA_LOCK_RD() \
|
||||
ocf_metadata_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_UNLOCK_RD() \
|
||||
ocf_metadata_unlock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_LOCK_RD_TRY() \
|
||||
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_LOCK_WR() \
|
||||
ocf_metadata_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_LOCK_WR_TRY() \
|
||||
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_UNLOCK_WR() \
|
||||
ocf_metadata_unlock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_BITS_LOCK_RD() \
|
||||
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_BITS_UNLOCK_RD() \
|
||||
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_BITS_LOCK_WR() \
|
||||
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_BITS_UNLOCK_WR() \
|
||||
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_FLUSH_LOCK() \
|
||||
ocf_metadata_flush_lock(cache)
|
||||
|
||||
#define OCF_METADATA_FLUSH_UNLOCK() \
|
||||
ocf_metadata_flush_unlock(cache)
|
||||
|
||||
#endif
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "eviction.h"
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_metadata_concurrency.h"
|
||||
|
||||
/**
|
||||
* @brief Initialize cache line before adding it into eviction
|
||||
|
@ -41,6 +41,8 @@ int ocf_metadata_init(struct ocf_cache *cache,
|
||||
if (ret)
|
||||
ocf_metadata_io_deinit(cache);
|
||||
|
||||
ocf_metadata_concurrency_init(cache);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -9,122 +9,6 @@
|
||||
#include "metadata_common.h"
|
||||
#include "../ocf_cache_priv.h"
|
||||
#include "../ocf_ctx_priv.h"
|
||||
|
||||
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
|
||||
{
|
||||
env_spinlock_lock(&cache->metadata.lock.eviction);
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
|
||||
{
|
||||
env_spinlock_unlock(&cache->metadata.lock.eviction);
|
||||
}
|
||||
|
||||
#define OCF_METADATA_EVICTION_LOCK() \
|
||||
ocf_metadata_eviction_lock(cache)
|
||||
|
||||
#define OCF_METADATA_EVICTION_UNLOCK() \
|
||||
ocf_metadata_eviction_unlock(cache)
|
||||
|
||||
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwsem_down_write(&cache->metadata.lock.collision);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwsem_down_read(&cache->metadata.lock.collision);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
|
||||
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwsem_up_write(&cache->metadata.lock.collision);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwsem_up_read(&cache->metadata.lock.collision);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
if (rw == OCF_METADATA_WR) {
|
||||
result = env_rwsem_down_write_trylock(
|
||||
&cache->metadata.lock.collision);
|
||||
} else if (rw == OCF_METADATA_RD) {
|
||||
result = env_rwsem_down_read_trylock(
|
||||
&cache->metadata.lock.collision);
|
||||
} else {
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
if (result)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_status_bits_lock(
|
||||
struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwlock_write_lock(&cache->metadata.lock.status);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwlock_read_lock(&cache->metadata.lock.status);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
static inline void ocf_metadata_status_bits_unlock(
|
||||
struct ocf_cache *cache, int rw)
|
||||
{
|
||||
if (rw == OCF_METADATA_WR)
|
||||
env_rwlock_write_unlock(&cache->metadata.lock.status);
|
||||
else if (rw == OCF_METADATA_RD)
|
||||
env_rwlock_read_unlock(&cache->metadata.lock.status);
|
||||
else
|
||||
ENV_BUG();
|
||||
}
|
||||
|
||||
#define OCF_METADATA_LOCK_RD() \
|
||||
ocf_metadata_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_UNLOCK_RD() \
|
||||
ocf_metadata_unlock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_LOCK_RD_TRY() \
|
||||
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_LOCK_WR() \
|
||||
ocf_metadata_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_LOCK_WR_TRY() \
|
||||
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_UNLOCK_WR() \
|
||||
ocf_metadata_unlock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_BITS_LOCK_RD() \
|
||||
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_BITS_UNLOCK_RD() \
|
||||
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
|
||||
|
||||
#define OCF_METADATA_BITS_LOCK_WR() \
|
||||
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_BITS_UNLOCK_WR() \
|
||||
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
|
||||
|
||||
#define OCF_METADATA_FLUSH_LOCK() \
|
||||
ocf_metadata_flush_lock(cache)
|
||||
|
||||
#define OCF_METADATA_FLUSH_UNLOCK() \
|
||||
ocf_metadata_flush_unlock(cache)
|
||||
|
||||
#include "metadata_cleaning_policy.h"
|
||||
#include "metadata_eviction_policy.h"
|
||||
#include "metadata_partition.h"
|
||||
|
@ -546,10 +546,6 @@ int ocf_metadata_hash_init(struct ocf_cache *cache,
|
||||
core->runtime_meta = &core_meta_runtime[core_id];
|
||||
}
|
||||
|
||||
env_spinlock_init(&cache->metadata.lock.eviction);
|
||||
env_rwlock_init(&cache->metadata.lock.status);
|
||||
env_rwsem_init(&cache->metadata.lock.collision);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1183,7 +1179,7 @@ static size_t ocf_metadata_hash_size_of(struct ocf_cache *cache)
|
||||
/* Get additional part of memory footprint */
|
||||
|
||||
/* Cache concurrency mechnism */
|
||||
size += ocf_cache_concurrency_size_of(cache);
|
||||
size += ocf_cache_line_concurrency_size_of(cache);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -6,7 +6,8 @@
|
||||
#ifndef __METADATA_STATUS_H__
|
||||
#define __METADATA_STATUS_H__
|
||||
|
||||
#include "../ocf_request.h"
|
||||
#include "../concurrency/ocf_metadata_concurrency.h"
|
||||
|
||||
/*******************************************************************************
|
||||
* Dirty
|
||||
******************************************************************************/
|
||||
|
@ -470,8 +470,4 @@ struct ocf_metadata {
|
||||
} lock;
|
||||
};
|
||||
|
||||
|
||||
#define OCF_METADATA_RD 0
|
||||
#define OCF_METADATA_WR 1
|
||||
|
||||
#endif /* __METADATA_STRUCTS_H__ */
|
||||
|
@ -1053,9 +1053,15 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
|
||||
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
|
||||
}
|
||||
|
||||
ocf_cache_log(cache, log_debug, "Cache attached\n");
|
||||
context->flags.attached_metadata_inited = true;
|
||||
|
||||
if (ocf_metadata_concurrency_attached_init(cache)) {
|
||||
ocf_cache_log(cache, log_err, "Failed to initialize attached "
|
||||
"metadata concurrency\n");
|
||||
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < OCF_IO_CLASS_MAX + 1; ++i) {
|
||||
cache->user_parts[i].runtime =
|
||||
&cache->device->runtime_meta->user_parts[i];
|
||||
@ -1538,6 +1544,8 @@ static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
|
||||
ocf_cleaner_refcnt_unfreeze(cache);
|
||||
ocf_refcnt_unfreeze(&cache->refcnt.metadata);
|
||||
|
||||
ocf_cache_log(cache, log_debug, "Cache attached\n");
|
||||
|
||||
ocf_pipeline_next(context->pipeline);
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ struct ocf_cache_device {
|
||||
struct ocf_part *freelist_part;
|
||||
|
||||
struct {
|
||||
struct ocf_cache_concurrency *cache;
|
||||
struct ocf_cache_line_concurrency *cache_line;
|
||||
} concurrency;
|
||||
|
||||
enum ocf_mngt_cache_init_mode init_mode;
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define UTILS_CACHE_LINE_H_
|
||||
|
||||
#include "../metadata/metadata.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "../eviction/eviction.h"
|
||||
#include "../eviction/ops.h"
|
||||
#include "../engine/cache_engine.h"
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "../utils/utils_cleaner.h"
|
||||
#include "../utils/utils_part.h"
|
||||
#include "../utils/utils_realloc.h"
|
||||
#include "../concurrency/ocf_cache_concurrency.h"
|
||||
#include "../concurrency/ocf_cache_line_concurrency.h"
|
||||
#include "../ocf_def_priv.h"
|
||||
|
||||
#include "cleaning/alru.c/cleaning_policy_alru_initialize_part_test_generated_warps.c"
|
||||
|
Loading…
Reference in New Issue
Block a user