Merge pull request #509 from Open-CAS/rm-metadata-updater

Remove metadata updater
This commit is contained in:
Robert Baldyga 2021-06-17 09:34:18 +02:00 committed by GitHub
commit 73c3e97f43
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1566 additions and 1666 deletions

View File

@ -159,32 +159,6 @@ static void ctx_cleaner_stop(ocf_cleaner_t c)
{
}
/*
* Initialize metadata updater thread. Metadata updater thread is left
* non-implemented to keep this example as simple as possible.
*/
static int ctx_metadata_updater_init(ocf_metadata_updater_t mu)
{
return 0;
}
/*
* Kick metadata updater thread. Metadata updater thread is left
* non-implemented to keep this example as simple as possible.
*/
static void ctx_metadata_updater_kick(ocf_metadata_updater_t mu)
{
ocf_metadata_updater_run(mu);
}
/*
* Stop metadata updater thread. Metadata updater thread is left
* non-implemented to keep this example as simple as possible.
*/
static void ctx_metadata_updater_stop(ocf_metadata_updater_t mu)
{
}
/*
* Function prividing interface for printing to log used by OCF internals.
* It can handle differently messages at varous log levels.
@ -257,12 +231,6 @@ static const struct ocf_ctx_config ctx_cfg = {
.stop = ctx_cleaner_stop,
},
.metadata_updater = {
.init = ctx_metadata_updater_init,
.kick = ctx_metadata_updater_kick,
.stop = ctx_metadata_updater_stop,
},
.logger = {
.print = ctx_logger_print,
.dump_stack = ctx_logger_dump_stack,

View File

@ -26,7 +26,6 @@
#include "cleaning/acp.h"
#include "promotion/nhit.h"
#include "ocf_metadata.h"
#include "ocf_metadata_updater.h"
#include "ocf_io_class.h"
#include "ocf_stats.h"
#include "ocf_mngt.h"

View File

@ -159,39 +159,15 @@ struct ocf_cleaner_ops {
void (*stop)(ocf_cleaner_t c);
};
/**
* @brief Metadata updater operations
*/
struct ocf_metadata_updater_ops {
/**
* @brief Initialize metadata updater.
*
* This function should create worker, thread, timer or any other
* mechanism responsible for calling metadata updater routine.
*
* @param[in] mu Handle to metadata updater to be initialized
*
* @retval 0 Metadata updater has been initializaed successfully
* @retval Non-zero I/O queue initialization failure
*/
int (*init)(ocf_metadata_updater_t mu);
typedef struct ocf_persistent_meta_zone *ocf_persistent_meta_zone_t;
/**
* @brief Kick metadata updater processing
*
* This function should inform worker, thread or any other mechanism,
* that there are new metadata requests to be processed.
*
* @param[in] mu Metadata updater to be kicked
*/
void (*kick)(ocf_metadata_updater_t mu);
/**
* @brief Stop metadata updater
*
* @param[in] mu Metadata updater beeing stopped
*/
void (*stop)(ocf_metadata_updater_t mu);
struct ocf_persistent_metadata_ops {
ocf_persistent_meta_zone_t (*init)(ocf_cache_t cache, size_t size,
bool *load);
int (*deinit)(ocf_persistent_meta_zone_t zone);
void *(*alloc)(ocf_persistent_meta_zone_t zone, size_t size,
int alloc_id, bool *load);
int (*free)(ocf_persistent_meta_zone_t zone, int alloc_id, void *ptr);
};
/**
@ -204,9 +180,6 @@ struct ocf_ctx_ops {
/* Cleaner operations */
struct ocf_cleaner_ops cleaner;
/* Metadata updater operations */
struct ocf_metadata_updater_ops metadata_updater;
/* Logger operations */
struct ocf_logger_ops logger;
};

View File

@ -1,50 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_METADATA_UPDATER_H__
#define __OCF_METADATA_UPDATER_H__
/**
* @file
* @brief OCF metadata updater API
*
*/
/**
* @brief Run metadata updater
*
* @param[in] mu Metadata updater instance to run
*
* @retval Hint if there is need to rerun without waiting.
*/
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu);
/**
* @brief Set metadata updater private data
*
* @param[in] c Metadata updater handle
* @param[in] priv Private data
*/
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv);
/**
* @brief Get metadata updater private data
*
* @param[in] c Metadata updater handle
*
* @retval Metadata updater private data
*/
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu);
/**
* @brief Get cache instance to which metadata updater belongs
*
* @param[in] c Metadata updater handle
*
* @retval Cache instance
*/
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu);
#endif /* __OCF_METADATA_UPDATER_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -6,15 +6,12 @@
#ifndef OCF_CACHE_CONCURRENCY_H_
#define OCF_CACHE_CONCURRENCY_H_
/**
* @file utils_req.h
* @brief OCF cache concurrency module
*/
#include "../utils/utils_alock.h"
/**
* @brief OCF cache concurrency module handle
* @file ocf_cache_line_concurrency.h
* @brief OCF cache concurrency module
*/
struct ocf_cache_line_concurrency;
/**
* @brief Initialize OCF cache concurrency module
@ -25,7 +22,7 @@ struct ocf_cache_line_concurrency;
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
int ocf_cache_line_concurrency_init(struct ocf_alock **self,
unsigned num_clines, struct ocf_cache *cache);
/**
@ -34,7 +31,7 @@ int ocf_cache_line_concurrency_init(struct ocf_cache_line_concurrency **self,
* @param self - cacheline concurrency private data
*/
void ocf_cache_line_concurrency_deinit(
struct ocf_cache_line_concurrency **self);
struct ocf_alock **self);
/**
* @brief Get number of waiting (suspended) OCF requests in due to cache
@ -44,7 +41,7 @@ void ocf_cache_line_concurrency_deinit(
*
* @return Number of suspended OCF requests
*/
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurrency *c);
uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_alock *c);
/**
* @brief Return memory footprint conusmed by cache concurrency module
@ -55,40 +52,37 @@ uint32_t ocf_cache_line_concurrency_suspended_no(struct ocf_cache_line_concurren
*/
size_t ocf_cache_line_concurrency_size_of(ocf_cache_t cache);
/* async request cacheline lock acquisition callback */
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
/**
* @brief Lock OCF request for write access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
* @param cmpl - async lock acquisition callback
*
* @returns lock acquisition status or negative error code in case of internal
* error
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb cllback be called
* added into waiting list. When lock will be acquired @cmpl cllback be called
*/
int ocf_req_async_lock_wr(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_wr(struct ocf_alock *c,
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
/**
* @brief Lock OCF request for read access (Lock all cache lines in map info)
*
* @param c - cacheline concurrency private data
* @param req - OCF request
* @param cb - async lock acquisition callback
* @param cmpl - async lock acquisition callback
*
* @returns lock acquisition status or negative error code in case of internal
* error
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired @cb callback be called
* added into waiting list. When lock will be acquired @cmpl callback be called
*/
int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
struct ocf_request *req, ocf_req_async_lock_cb cb);
int ocf_req_async_lock_rd(struct ocf_alock *c,
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
/**
* @brief Unlock OCF request from write access
@ -96,7 +90,7 @@ int ocf_req_async_lock_rd(struct ocf_cache_line_concurrency *c,
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c,
void ocf_req_unlock_wr(struct ocf_alock *c,
struct ocf_request *req);
/**
@ -105,7 +99,7 @@ void ocf_req_unlock_wr(struct ocf_cache_line_concurrency *c,
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c,
void ocf_req_unlock_rd(struct ocf_alock *c,
struct ocf_request *req);
/**
@ -114,7 +108,7 @@ void ocf_req_unlock_rd(struct ocf_cache_line_concurrency *c,
* @param c - cacheline concurrency private data
* @param req - OCF request
*/
void ocf_req_unlock(struct ocf_cache_line_concurrency *c,
void ocf_req_unlock(struct ocf_alock *c,
struct ocf_request *req);
/**
@ -131,7 +125,7 @@ void ocf_req_unlock(struct ocf_cache_line_concurrency *c,
* @retval true - cache line is used
* @retval false - cache line is not used
*/
bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_is_used(struct ocf_alock *c,
ocf_cache_line_t line);
/**
@ -144,7 +138,7 @@ bool ocf_cache_line_is_used(struct ocf_cache_line_concurrency *c,
* @retval true - there are waiters
* @retval false - No waiters
*/
bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_are_waiters(struct ocf_alock *c,
ocf_cache_line_t line);
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
@ -157,7 +151,7 @@ bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
* @param req - OCF request
* @param entry - request map entry number
*/
void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
void ocf_req_unlock_entry(struct ocf_alock *c,
struct ocf_request *req, uint32_t entry);
/**
@ -166,7 +160,7 @@ void ocf_req_unlock_entry(struct ocf_cache_line_concurrency *c,
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c,
void ocf_cache_line_unlock_rd(struct ocf_alock *c,
ocf_cache_line_t line);
/**
@ -178,7 +172,7 @@ void ocf_cache_line_unlock_rd(struct ocf_cache_line_concurrency *c,
* @retval true - read lock successfully acquired
* @retval false - failed to acquire read lock
*/
bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_try_lock_rd(struct ocf_alock *c,
ocf_cache_line_t line);
/**
@ -187,7 +181,7 @@ bool ocf_cache_line_try_lock_rd(struct ocf_cache_line_concurrency *c,
* @param c - cacheline concurrency private data
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
void ocf_cache_line_unlock_wr(struct ocf_alock *c,
ocf_cache_line_t line);
/**
@ -199,7 +193,7 @@ void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
* @retval true - write lock successfully acquired
* @retval false - failed to acquire write lock
*/
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
bool ocf_cache_line_try_lock_wr(struct ocf_alock *c,
ocf_cache_line_t line);
/**
@ -208,7 +202,7 @@ bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
* @param cache - cache instance
* @return cacheline concurrency context
*/
static inline struct ocf_cache_line_concurrency *
static inline struct ocf_alock *
ocf_cache_line_concurrency(ocf_cache_t cache)
{
return cache->device->concurrency.cache_line;

View File

@ -13,16 +13,6 @@
* @brief OCF concurrency
*/
/**
* @brief Lock result - Lock acquired successfully
*/
#define OCF_LOCK_ACQUIRED 0
/**
* @brief Lock result - Lock not acquired, lock request added into waiting list
*/
#define OCF_LOCK_NOT_ACQUIRED 1
/**
* @brief Initialize OCF concurrency module
*

View File

@ -0,0 +1,175 @@
/*
* Copyright(c) 2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_concurrency.h"
#include "../metadata/metadata_io.h"
#include "../ocf_priv.h"
#include "../ocf_request.h"
#include "../utils/utils_alock.h"
#include "../utils/utils_cache_line.h"
struct ocf_mio_alock
{
unsigned first_page;
unsigned num_pages;
};
static ocf_cache_line_t ocf_mio_lock_get_entry(
struct ocf_alock *alock, struct ocf_request *req,
unsigned index)
{
struct ocf_mio_alock *mio_alock = (void*)alock + ocf_alock_obj_size();
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
unsigned page = m_req->page + index;
ENV_BUG_ON(page < mio_alock->first_page);
ENV_BUG_ON(page >= mio_alock->first_page + mio_alock->num_pages);
return page - mio_alock->first_page;
}
static int ocf_mio_lock_fast(struct ocf_alock *alock,
struct ocf_request *req, int rw)
{
ocf_cache_line_t entry;
int ret = OCF_LOCK_ACQUIRED;
int32_t i;
ENV_BUG_ON(rw != OCF_WRITE);
for (i = 0; i < req->core_line_count; i++) {
entry = ocf_mio_lock_get_entry(alock, req, i);
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
if (ocf_alock_trylock_entry_wr(alock, entry)) {
/* cache entry locked */
ocf_alock_mark_index_locked(alock, req, i, true);
} else {
/* Not possible to lock all cachelines */
ret = OCF_LOCK_NOT_ACQUIRED;
break;
}
}
/* Check if request is locked */
if (ret == OCF_LOCK_NOT_ACQUIRED) {
/* Request is not locked, discard acquired locks */
for (; i >= 0; i--) {
entry = ocf_mio_lock_get_entry(alock, req, i);
if (ocf_alock_is_index_locked(alock, req, i)) {
ocf_alock_unlock_one_wr(alock, entry);
ocf_alock_mark_index_locked(alock, req, i, false);
}
}
}
return ret;
}
static int ocf_mio_lock_slow(struct ocf_alock *alock,
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl)
{
int32_t i;
ocf_cache_line_t entry;
int ret = 0;
ENV_BUG_ON(rw != OCF_WRITE);
for (i = 0; i < req->core_line_count; i++) {
entry = ocf_mio_lock_get_entry(alock, req, i);
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) {
/* lock not acquired and not added to wait list */
ret = -OCF_ERR_NO_MEM;
goto err;
}
}
return ret;
err:
for (; i >= 0; i--) {
entry = ocf_mio_lock_get_entry(alock, req, i);
ocf_alock_waitlist_remove_entry(alock, req, i, entry, OCF_WRITE);
}
return ret;
}
static struct ocf_alock_lock_cbs ocf_mio_conc_cbs = {
.lock_entries_fast = ocf_mio_lock_fast,
.lock_entries_slow = ocf_mio_lock_slow
};
int ocf_mio_async_lock(struct ocf_alock *alock,
struct metadata_io_request *m_req,
ocf_req_async_lock_cb cmpl)
{
return ocf_alock_lock_wr(alock, &m_req->req, cmpl);
}
void ocf_mio_async_unlock(struct ocf_alock *alock,
struct metadata_io_request *m_req)
{
ocf_cache_line_t entry;
struct ocf_request *req = &m_req->req;
int i;
for (i = 0; i < req->core_line_count; i++) {
if (!ocf_alock_is_index_locked(alock, req, i))
continue;
entry = ocf_mio_lock_get_entry(alock, req, i);
ocf_alock_unlock_one_wr(alock, entry);
ocf_alock_mark_index_locked(alock, req, i, false);
}
m_req->alock_status = 0;
}
#define ALLOCATOR_NAME_FMT "ocf_%s_mio_conc"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_mio_concurrency_init(struct ocf_alock **self,
unsigned first_page, unsigned num_pages,
ocf_cache_t cache)
{
struct ocf_alock *alock;
struct ocf_mio_alock *mio_alock;
size_t base_size = ocf_alock_obj_size();
char name[ALLOCATOR_NAME_MAX];
int ret;
ret = snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache));
if (ret < 0)
return ret;
if (ret >= ALLOCATOR_NAME_MAX)
return -OCF_ERR_NO_MEM;
alock = env_vzalloc(base_size + sizeof(struct ocf_mio_alock));
if (!alock)
return -OCF_ERR_NO_MEM;
ret = ocf_alock_init_inplace(alock, num_pages, name, &ocf_mio_conc_cbs, cache);
if (ret) {
env_free(alock);
return ret;
}
mio_alock = (void*)alock + base_size;
mio_alock->first_page = first_page;
mio_alock->num_pages = num_pages;
*self = alock;
return 0;
}
void ocf_mio_concurrency_deinit(struct ocf_alock **self)
{
ocf_alock_deinit(self);
}

View File

@ -0,0 +1,26 @@
/*
* Copyright(c) 2021-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_MIO_CONCURRENCY_H_
#define OCF_MIO_CONCURRENCY_H_
#include "../utils/utils_alock.h"
struct metadata_io_request;
int ocf_mio_async_lock(struct ocf_alock *alock,
struct metadata_io_request *m_req,
ocf_req_async_lock_cb cmpl);
void ocf_mio_async_unlock(struct ocf_alock *alock,
struct metadata_io_request *m_req);
int ocf_mio_concurrency_init(struct ocf_alock **self,
unsigned first_page, unsigned num_pages,
ocf_cache_t cache);
void ocf_mio_concurrency_deinit(struct ocf_alock **self);
#endif

View File

@ -494,7 +494,7 @@ static void ocf_engine_evict(struct ocf_request *req)
static int lock_clines(struct ocf_request *req)
{
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
struct ocf_alock *c = ocf_cache_line_concurrency(req->cache);
enum ocf_engine_lock_type lock_type =
req->engine_cbs->get_lock_type(req);

View File

@ -24,7 +24,7 @@
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
{
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(
struct ocf_alock *c = ocf_cache_line_concurrency(
req->cache);
if (error)
@ -139,7 +139,7 @@ err_alloc:
static int _ocf_read_generic_do(struct ocf_request *req)
{
if (ocf_engine_is_miss(req) && req->map->rd_locked) {
if (ocf_engine_is_miss(req) && req->alock_rw == OCF_READ) {
/* Miss can be handled only on write locks.
* Need to switch to PT
*/

View File

@ -346,7 +346,7 @@ static inline bool _lru_evp_all_empty(struct ocf_lru_iter *iter)
static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_cache_line_concurrency *c =
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
return iter->cl_lock_write ?
@ -357,7 +357,7 @@ static bool inline _lru_trylock_cacheline(struct ocf_lru_iter *iter,
static void inline _lru_unlock_cacheline(struct ocf_lru_iter *iter,
ocf_cache_line_t cline)
{
struct ocf_cache_line_concurrency *c =
struct ocf_alock *c =
ocf_cache_line_concurrency(iter->cache);
if (iter->cl_lock_write)
@ -611,6 +611,7 @@ bool evp_lru_can_evict(ocf_cache_t cache)
uint32_t evp_lru_req_clines(struct ocf_request *req,
struct ocf_user_part *part, uint32_t cline_no)
{
struct ocf_alock* alock;
struct ocf_lru_iter iter;
uint32_t i;
ocf_cache_line_t cline;
@ -680,10 +681,10 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
req->map[req_idx].status = LOOKUP_REMAPPED;
ocf_engine_patch_req_info(cache, req, req_idx);
if (cl_write_lock)
req->map[req_idx].wr_locked = true;
else
req->map[req_idx].rd_locked = true;
alock = ocf_cache_line_concurrency(iter.cache);
ocf_alock_mark_index_locked(alock, req, req_idx, true);
req->alock_rw = cl_write_lock ? OCF_WRITE : OCF_READ;
++req_idx;
++i;

View File

@ -1585,16 +1585,12 @@ int ocf_metadata_init(struct ocf_cache *cache,
OCF_DEBUG_TRACE(cache);
ret = ocf_metadata_init_fixed_size(cache, cache_line_size);
if (ret) {
ocf_metadata_io_deinit(cache);
if (ret)
return ret;
}
ret = ocf_metadata_concurrency_init(&cache->metadata.lock);
if (ret) {
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_io_deinit(cache);
return ret;
}
@ -1607,7 +1603,6 @@ void ocf_metadata_deinit(struct ocf_cache *cache)
ocf_metadata_deinit_fixed_size(cache);
ocf_metadata_concurrency_deinit(&cache->metadata.lock);
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)

View File

@ -12,6 +12,7 @@
#include "../utils/utils_io.h"
#include "../ocf_request.h"
#include "../ocf_def_priv.h"
#include "../concurrency/ocf_mio_concurrency.h"
#define OCF_METADATA_IO_DEBUG 0
@ -272,7 +273,6 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req);
static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
ocf_cache_t cache = m_req->cache;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
@ -286,16 +286,50 @@ static void metadata_io_io_end(struct metadata_io_request *m_req, int error)
OCF_DEBUG_PARAM(cache, "Page = %u", m_req->page);
if (a_req->mio_conc)
ocf_mio_async_unlock(a_req->mio_conc, m_req);
metadata_io_req_advance(m_req);
env_atomic_set(&m_req->finished, 1);
ocf_metadata_updater_kick(cache);
metadata_io_req_complete(m_req);
}
static void matadata_io_page_lock_acquired(struct ocf_request *req)
{
ocf_engine_push_req_front(req, true);
}
void metadata_io_req_finalize(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
if (env_atomic_dec_return(&a_req->req_active) == 0)
env_mpool_del(m_req->cache->owner->resources.mio, a_req,
a_req->alloc_req_count);
}
static void metadata_io_req_submit(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
int lock;
env_atomic_set(&m_req->finished, 0);
metadata_updater_submit(m_req);
if (a_req->mio_conc) {
lock = ocf_mio_async_lock(a_req->mio_conc, m_req,
matadata_io_page_lock_acquired);
if (lock != OCF_LOCK_ACQUIRED) {
a_req->error = lock;
metadata_io_req_finalize(m_req);
return;
}
}
if (!a_req->mio_conc || lock == OCF_LOCK_ACQUIRED)
matadata_io_page_lock_acquired(&m_req->req);
}
void metadata_io_req_end(struct metadata_io_request *m_req)
@ -309,18 +343,14 @@ void metadata_io_req_end(struct metadata_io_request *m_req)
ctx_data_free(cache->owner, m_req->data);
}
void metadata_io_req_finalize(struct metadata_io_request *m_req)
{
struct metadata_io_request_asynch *a_req = m_req->asynch;
if (env_atomic_dec_return(&a_req->req_active) == 0)
env_mpool_del(m_req->cache->owner->resources.mio, a_req,
a_req->alloc_req_count);
}
static uint32_t metadata_io_max_page(ocf_cache_t cache)
{
return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE;
uint32_t volume_max_io_pages = ocf_volume_get_max_io_size(
&cache->device->volume) / PAGE_SIZE;
struct metadata_io_request *m_req;
uint32_t request_map_capacity_pages = sizeof(m_req->alock_status) * 8;
return OCF_MIN(volume_max_io_pages, request_map_capacity_pages);
}
static void metadata_io_req_advance(struct metadata_io_request *m_req)
@ -344,6 +374,7 @@ static void metadata_io_req_advance(struct metadata_io_request *m_req)
m_req->page = a_req->page + curr * max_count;
m_req->count = OCF_MIN(a_req->count - curr * max_count, max_count);
m_req->req.core_line_count = m_req->count;
}
static void metadata_io_req_start(struct metadata_io_request *m_req)
@ -381,7 +412,8 @@ void metadata_io_req_complete(struct metadata_io_request *m_req)
static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t io_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
struct metadata_io_request_asynch *a_req;
struct metadata_io_request *m_req;
@ -410,6 +442,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
a_req->flags = flags;
a_req->on_meta_fill = io_hndl;
a_req->on_meta_drain = io_hndl;
a_req->mio_conc = mio_conc;
/* IO Requests initialization */
for (i = 0; i < req_count; i++) {
@ -425,6 +458,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
m_req->req.info.internal = true;
m_req->req.rw = dir;
m_req->req.map = LIST_POISON1;
m_req->req.alock_status = (uint8_t*)&m_req->alock_status;
/* If req_count == io_count and count is not multiple of
* max_count, for last we can allocate data smaller that
@ -460,10 +494,11 @@ err:
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl)
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc)
{
return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
page, count, flags, fill_hndl, compl_hndl);
page, count, flags, fill_hndl, compl_hndl, mio_conc);
}
int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
@ -472,7 +507,7 @@ int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
ocf_metadata_io_end_t compl_hndl)
{
return metadata_io_i_asynch(cache, queue, OCF_READ, context,
page, count, flags, drain_hndl, compl_hndl);
page, count, flags, drain_hndl, compl_hndl, NULL);
}
#define MIO_RPOOL_LIMIT 16
@ -505,13 +540,3 @@ void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx)
env_mpool_destroy(ocf_ctx->resources.mio);
ocf_ctx->resources.mio = NULL;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}

View File

@ -6,6 +6,8 @@
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_io.h
* @brief Metadata IO utilities
@ -54,6 +56,7 @@ struct metadata_io_request {
env_atomic finished;
uint32_t page;
uint32_t count;
uint64_t alock_status;
};
/*
@ -67,6 +70,7 @@ struct metadata_io_request_asynch {
ocf_metadata_io_event_t on_meta_fill;
ocf_metadata_io_event_t on_meta_drain;
ocf_metadata_io_end_t on_complete;
struct ocf_alock *mio_conc;
uint32_t page;
uint32_t count;
uint32_t alloc_req_count; /*< Number of allocated metadata_io_requests */
@ -122,7 +126,8 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, int flags,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl);
ocf_metadata_io_end_t compl_hndl,
struct ocf_alock *mio_conc);
/**
* @brief Iterative asynchronous pages read
@ -152,14 +157,4 @@ int ocf_metadata_io_ctx_init(struct ocf_ctx *ocf_ctx);
*/
void ocf_metadata_io_ctx_deinit(struct ocf_ctx *ocf_ctx);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@ -51,7 +51,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
struct ocf_cache_line_concurrency *c =
struct ocf_alock *c =
ocf_cache_line_concurrency(cache);
start_line = ocf_bytes_2_lines(cache, start_byte);

View File

@ -83,6 +83,8 @@ static int _raw_ram_deinit(ocf_cache_t cache,
raw->mem_pool = NULL;
}
ocf_mio_concurrency_deinit(&raw->mio_conc);
return 0;
}
@ -95,16 +97,27 @@ static int _raw_ram_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
int ret;
OCF_DEBUG_TRACE(cache);
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_secure_alloc(mem_pool_size);
if (!raw->mem_pool)
if (!raw->mem_pool) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -OCF_ERR_NO_MEM;
}
ENV_BUG_ON(env_memset(raw->mem_pool, mem_pool_size, 0));
raw->lock_page = lock_page_pfn;
@ -310,7 +323,8 @@ static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete,
raw->mio_conc);
if (result)
_raw_ram_flush_all_complete(cache, context, result);
}
@ -516,7 +530,8 @@ static int _raw_ram_flush_do_asynch(ocf_cache_t cache,
raw->ssd_pages_offset + start_page, count,
req->ioi.io.flags,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
_raw_ram_flush_do_asynch_io_complete,
raw->mio_conc);
if (result)
break;

View File

@ -7,6 +7,7 @@
#define __METADATA_RAW_H__
#include "metadata_segment_id.h"
#include "../concurrency/ocf_mio_concurrency.h"
/**
* @file metadata_raw.h
@ -88,6 +89,8 @@ struct ocf_metadata_raw {
ocf_flush_page_synch_t lock_page; /*!< Page lock callback */
ocf_flush_page_synch_t unlock_page; /*!< Page unlock callback */
struct ocf_alock *mio_conc;
};
/**

View File

@ -126,6 +126,8 @@ int raw_dynamic_deinit(ocf_cache_t cache,
OCF_DEBUG_TRACE(cache);
ocf_mio_concurrency_deinit(&raw->mio_conc);
for (i = 0; i < raw->ssd_pages; i++)
env_secure_free(ctrl->pages[i], PAGE_SIZE);
@ -147,19 +149,30 @@ int raw_dynamic_init(ocf_cache_t cache,
{
struct _raw_ctrl *ctrl;
size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
int ret;
OCF_DEBUG_TRACE(cache);
if (raw->entry_size > PAGE_SIZE)
return -1;
/* TODO: caller should specify explicitly whether to init mio conc? */
if (lock_page_pfn) {
ret = ocf_mio_concurrency_init(&raw->mio_conc,
raw->ssd_pages_offset, raw->ssd_pages, cache);
if (ret)
return ret;
}
ctrl = env_vmalloc(size);
if (!ctrl)
if (!ctrl) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
return -1;
}
ENV_BUG_ON(env_memset(ctrl, size, 0));
if (env_mutex_init(&ctrl->lock)) {
ocf_mio_concurrency_deinit(&raw->mio_conc);
env_vfree(ctrl);
return -1;
}
@ -519,7 +532,8 @@ void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages, 0,
raw_dynamic_flush_all_fill,
raw_dynamic_flush_all_complete);
raw_dynamic_flush_all_complete,
raw->mio_conc);
if (result)
OCF_CMPL_RET(priv, result);
}

View File

@ -1,163 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "metadata_updater_priv.h"
#include "../ocf_priv.h"
#include "../engine/engine_common.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../utils/utils_io.h"
int ocf_metadata_updater_init(ocf_cache_t cache)
{
ocf_metadata_updater_t mu = &cache->metadata_updater;
struct ocf_metadata_io_syncher *syncher = &mu->syncher;
INIT_LIST_HEAD(&syncher->in_progress_head);
INIT_LIST_HEAD(&syncher->pending_head);
env_mutex_init(&syncher->lock);
return ctx_metadata_updater_init(cache->owner, mu);
}
void ocf_metadata_updater_kick(ocf_cache_t cache)
{
ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater);
}
void ocf_metadata_updater_stop(ocf_cache_t cache)
{
ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater);
env_mutex_destroy(&cache->metadata_updater.syncher.lock);
}
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv)
{
OCF_CHECK_NULL(mu);
mu->priv = priv;
}
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return mu->priv;
}
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu)
{
OCF_CHECK_NULL(mu);
return container_of(mu, struct ocf_cache, metadata_updater);
}
static int _metadata_updater_iterate_in_progress(ocf_cache_t cache,
struct list_head *finished, struct metadata_io_request *new_req)
{
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) {
if (env_atomic_read(&curr->finished)) {
list_move_tail(&curr->list, finished);
continue;
}
if (new_req) {
/* If request specified, check if overlap occurs. */
if (ocf_io_overlaps(new_req->page, new_req->count,
curr->page, curr->count)) {
return 1;
}
}
}
return 0;
}
static void metadata_updater_process_finished(struct list_head *finished)
{
struct metadata_io_request *curr, *temp;
list_for_each_entry_safe(curr, temp, finished, list) {
list_del(&curr->list);
metadata_io_req_complete(curr);
}
}
void metadata_updater_submit(struct metadata_io_request *m_req)
{
ocf_cache_t cache = m_req->cache;
struct ocf_metadata_io_syncher *syncher =
&cache->metadata_updater.syncher;
struct list_head finished;
int ret;
INIT_LIST_HEAD(&finished);
env_mutex_lock(&syncher->lock);
ret = _metadata_updater_iterate_in_progress(cache, &finished, m_req);
/* Either add it to in-progress list or pending list for deferred
* execution.
*/
if (ret == 0)
list_add_tail(&m_req->list, &syncher->in_progress_head);
else
list_add_tail(&m_req->list, &syncher->pending_head);
env_mutex_unlock(&syncher->lock);
if (ret == 0)
ocf_engine_push_req_front(&m_req->req, true);
metadata_updater_process_finished(&finished);
}
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
{
struct metadata_io_request *curr, *temp;
struct ocf_metadata_io_syncher *syncher;
struct list_head finished;
ocf_cache_t cache;
int ret;
OCF_CHECK_NULL(mu);
INIT_LIST_HEAD(&finished);
cache = ocf_metadata_updater_get_cache(mu);
syncher = &cache->metadata_updater.syncher;
env_mutex_lock(&syncher->lock);
if (list_empty(&syncher->pending_head)) {
/*
* If pending list is empty, we iterate over in progress
* list to free memory used by finished requests.
*/
_metadata_updater_iterate_in_progress(cache, &finished, NULL);
env_mutex_unlock(&syncher->lock);
metadata_updater_process_finished(&finished);
env_cond_resched();
return 0;
}
list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) {
ret = _metadata_updater_iterate_in_progress(cache, &finished, curr);
if (ret == 0) {
/* Move to in-progress list and kick the workers */
list_move_tail(&curr->list, &syncher->in_progress_head);
}
env_mutex_unlock(&syncher->lock);
metadata_updater_process_finished(&finished);
if (ret == 0)
ocf_engine_push_req_front(&curr->req, true);
env_cond_resched();
env_mutex_lock(&syncher->lock);
}
env_mutex_unlock(&syncher->lock);
return 0;
}

View File

@ -1,32 +0,0 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_UPDATER_PRIV_H__
#define __METADATA_UPDATER_PRIV_H__
#include "../ocf_def_priv.h"
#include "metadata_io.h"
struct ocf_metadata_updater {
/* Metadata flush synchronizer context */
struct ocf_metadata_io_syncher {
struct list_head in_progress_head;
struct list_head pending_head;
env_mutex lock __attribute__((aligned(64)));
} syncher;
void *priv;
};
void metadata_updater_submit(struct metadata_io_request *m_req);
int ocf_metadata_updater_init(struct ocf_cache *cache);
void ocf_metadata_updater_kick(struct ocf_cache *cache);
void ocf_metadata_updater_stop(struct ocf_cache *cache);
#endif /* __METADATA_UPDATER_PRIV_H__ */

View File

@ -10,6 +10,7 @@
#include "../ocf_core_priv.h"
#include "../ocf_queue_priv.h"
#include "../metadata/metadata.h"
#include "../metadata/metadata_io.h"
#include "../engine/cache_engine.h"
#include "../utils/utils_part.h"
#include "../utils/utils_cache_line.h"
@ -1231,10 +1232,6 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
params.flags.added_to_list = true;
env_rmutex_unlock(&ctx->lock);
result = ocf_metadata_io_init(tmp_cache);
if (result)
goto _cache_mngt_init_instance_ERROR;
ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
_ocf_mngt_cache_init(tmp_cache, &params);

View File

@ -12,7 +12,6 @@
#include "ocf_core_priv.h"
#include "metadata/metadata_structs.h"
#include "metadata/metadata_partition_structs.h"
#include "metadata/metadata_updater_priv.h"
#include "utils/utils_list.h"
#include "utils/utils_pipeline.h"
#include "utils/utils_refcnt.h"
@ -60,7 +59,7 @@ struct ocf_cache_device {
uint64_t metadata_offset;
struct {
struct ocf_cache_line_concurrency *cache_line;
struct ocf_alock *cache_line;
} concurrency;
struct ocf_superblock_runtime *runtime_meta;
@ -114,8 +113,6 @@ struct ocf_cache {
env_atomic flush_in_progress;
env_mutex flush_mutex;
struct ocf_metadata_updater metadata_updater;
struct ocf_cleaner cleaner;
struct list_head io_queues;

View File

@ -136,10 +136,6 @@ static void check_ops_provided(const struct ocf_ctx_ops *ops)
ENV_BUG_ON(!ops->cleaner.init);
ENV_BUG_ON(!ops->cleaner.kick);
ENV_BUG_ON(!ops->cleaner.stop);
ENV_BUG_ON(!ops->metadata_updater.init);
ENV_BUG_ON(!ops->metadata_updater.kick);
ENV_BUG_ON(!ops->metadata_updater.stop);
}
/*

View File

@ -160,24 +160,6 @@ static inline void ctx_cleaner_kick(ocf_ctx_t ctx, ocf_cleaner_t cleaner)
ctx->ops->cleaner.kick(cleaner);
}
static inline int ctx_metadata_updater_init(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
return ctx->ops->metadata_updater.init(mu);
}
static inline void ctx_metadata_updater_kick(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
ctx->ops->metadata_updater.kick(mu);
}
static inline void ctx_metadata_updater_stop(ocf_ctx_t ctx,
ocf_metadata_updater_t mu)
{
ctx->ops->metadata_updater.stop(mu);
}
/**
* @}
*/

View File

@ -43,10 +43,19 @@ static inline size_t ocf_req_sizeof_map(struct ocf_request *req)
return size;
}
static inline size_t ocf_req_sizeof_alock_status(struct ocf_request *req)
{
uint32_t lines = req->core_line_count;
size_t size = (lines * sizeof(uint8_t));
ENV_BUG_ON(lines == 0);
return size;
}
int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
{
ocf_ctx->resources.req = env_mpool_create(sizeof(struct ocf_request),
sizeof(struct ocf_map_info), ENV_MEM_NORMAL, ocf_req_size_128,
sizeof(struct ocf_map_info) + sizeof(uint8_t), ENV_MEM_NORMAL, ocf_req_size_128,
false, NULL, "ocf_req", true);
if (ocf_ctx->resources.req == NULL)
@ -90,6 +99,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
if (map_allocated) {
req->map = req->__map;
req->alock_status = (uint8_t*)&req->__map[core_line_count];
req->alloc_core_line_count = core_line_count;
} else {
req->alloc_core_line_count = 1;
@ -131,12 +141,15 @@ int ocf_req_alloc_map(struct ocf_request *req)
if (req->map)
return 0;
req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO);
req->map = env_zalloc(ocf_req_sizeof_map(req) +
ocf_req_sizeof_alock_status(req), ENV_MEM_NOIO);
if (!req->map) {
req->error = -OCF_ERR_NO_MEM;
return -OCF_ERR_NO_MEM;
}
req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(req)];
return 0;
}

View File

@ -61,12 +61,6 @@ struct ocf_map_info {
uint16_t status : 8;
/*!< Traverse or mapping status - HIT, MISS, etc... */
uint16_t rd_locked : 1;
/*!< Indicates if cache line is locked for READ access */
uint16_t wr_locked : 1;
/*!< Indicates if cache line is locked for WRITE access */
uint16_t invalid : 1;
/*!< This bit indicates that mapping is invalid */
@ -222,9 +216,15 @@ struct ocf_request {
struct ocf_req_discard_info discard;
uint32_t alock_rw;
/*!< Read/Write mode for alock*/
uint8_t *alock_status;
/*!< Mapping for locked/unlocked alock entries */
struct ocf_map_info *map;
struct ocf_map_info __map[];
struct ocf_map_info __map[0];
};
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);

813
src/utils/utils_alock.c Normal file
View File

@ -0,0 +1,813 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../ocf_cache_priv.h"
#include "../ocf_priv.h"
#include "../ocf_request.h"
#include "utils_alock.h"
#define OCF_CACHE_CONCURRENCY_DEBUG 0
#if 1 == OCF_CACHE_CONCURRENCY_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Concurrency][Cache] %s\n", __func__)
#define OCF_DEBUG_RQ(req, format, ...) \
ocf_cache_log(req->cache, log_info, "[Concurrency][Cache][%s][%p] %s - " \
format"\n", OCF_READ == (req)->rw ? "RD" : "WR", req, \
__func__, ##__VA_ARGS__)
#define OCF_DEBUG_CACHE(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Concurrency][Cache][%s] - " \
format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_RQ(req, format, ...)
#define OCF_DEBUG_CACHE(cache, format, ...)
#endif
#define OCF_CACHE_LINE_ACCESS_WR INT_MAX
#define OCF_CACHE_LINE_ACCESS_IDLE 0
#define OCF_CACHE_LINE_ACCESS_ONE_RD 1
#define _WAITERS_LIST_SIZE (16UL * MiB)
#define _WAITERS_LIST_ENTRIES \
(_WAITERS_LIST_SIZE / sizeof(struct ocf_alock_waiters_list))
#define _WAITERS_LIST_ITEM(entry) ((entry) % _WAITERS_LIST_ENTRIES)
struct ocf_alock_waiter {
ocf_cache_line_t entry;
uint32_t idx;
struct ocf_request *req;
ocf_req_async_lock_cb cmpl;
struct list_head item;
int rw;
};
struct ocf_alock_waiters_list {
struct list_head head;
env_spinlock lock;
};
struct ocf_alock {
ocf_cache_t cache;
env_mutex lock;
env_atomic *access;
env_atomic waiting;
ocf_cache_line_t num_entries;
env_allocator *allocator;
struct ocf_alock_lock_cbs *cbs;
struct ocf_alock_waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
};
void ocf_alock_mark_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, bool locked)
{
if (locked)
env_bit_set(index, req->alock_status);
else
env_bit_clear(index, req->alock_status);
}
bool ocf_alock_is_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index)
{
return env_bit_test(index, (unsigned long*)req->alock_status);
}
size_t ocf_alock_obj_size(void)
{
return sizeof(struct ocf_alock);
}
int ocf_alock_init_inplace(struct ocf_alock *self, unsigned num_entries,
const char* name, struct ocf_alock_lock_cbs *cbs, ocf_cache_t cache)
{
uint32_t i;
int error = 0;
OCF_DEBUG_TRACE(cache);
self->cache = cache;
self->num_entries = num_entries;
self->cbs = cbs;
error = env_mutex_init(&self->lock);
if (error) {
error = __LINE__;
goto rwsem_err;
}
self->access = env_vzalloc(num_entries * sizeof(self->access[0]));
if (!self->access) {
error = __LINE__;
goto allocation_err;
}
self->allocator = env_allocator_create(sizeof(struct ocf_alock_waiter), name, false);
if (!self->allocator) {
error = __LINE__;
goto allocation_err;
}
/* Init concurrency control table */
for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) {
INIT_LIST_HEAD(&self->waiters_lsts[i].head);
error = env_spinlock_init(&self->waiters_lsts[i].lock);
if (error) {
error = __LINE__;
goto spinlock_err;
}
}
return 0;
spinlock_err:
while (i--)
env_spinlock_destroy(&self->waiters_lsts[i].lock);
allocation_err:
if (self->allocator)
env_allocator_destroy(self->allocator);
if (self->access)
env_vfree(self->access);
env_mutex_destroy(&self->lock);
rwsem_err:
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
"ERROR %d", error);
return -1;
}
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, struct ocf_alock_lock_cbs *cbs, ocf_cache_t cache)
{
struct ocf_alock *alock;
int ret;
OCF_DEBUG_TRACE(cache);
alock = env_vzalloc(sizeof(*alock));
if (!alock)
return -OCF_ERR_NO_MEM;
ret = ocf_alock_init_inplace(alock, num_entries,
name, cbs, cache);
if (!ret)
*self = alock;
else
env_vfree(alock);
return ret;
}
void ocf_alock_deinit(struct ocf_alock **self)
{
struct ocf_alock *concurrency = *self;
int i;
if (!concurrency)
return;
OCF_DEBUG_TRACE(concurrency->cache);
env_mutex_destroy(&concurrency->lock);
for (i = 0; i < _WAITERS_LIST_ENTRIES; i++)
env_spinlock_destroy(&concurrency->waiters_lsts[i].lock);
if (concurrency->access)
env_vfree(concurrency->access);
if (concurrency->allocator)
env_allocator_destroy(concurrency->allocator);
env_vfree(concurrency);
*self = NULL;
}
size_t ocf_alock_size(unsigned num_entries)
{
size_t size;
size = sizeof(env_atomic);
size *= num_entries;
size += sizeof(struct ocf_alock);
return size;
}
static inline bool ocf_alock_waitlist_is_empty_locked(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
bool are = false;
struct list_head *iter;
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct ocf_alock_waiter *waiter;
/* If list empty that means there are no waiters on cache entry */
if (list_empty(&lst->head))
return true;
list_for_each(iter, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
if (waiter->entry == entry) {
are = true;
break;
}
}
return !are;
}
static inline void ocf_alock_waitlist_add(struct ocf_alock *alock,
ocf_cache_line_t entry, struct ocf_alock_waiter *waiter)
{
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
list_add_tail(&waiter->item, &lst->head);
}
#define ocf_alock_waitlist_lock(cncrrncy, entry, flags) \
do { \
uint32_t idx = _WAITERS_LIST_ITEM(entry); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
env_spinlock_lock_irqsave(&lst->lock, flags); \
} while (0)
#define ocf_alock_waitlist_unlock(cncrrncy, entry, flags) \
do { \
uint32_t idx = _WAITERS_LIST_ITEM(entry); \
struct ocf_alock_waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \
env_spinlock_unlock_irqrestore(&lst->lock, flags); \
} while (0)
bool ocf_alock_trylock_entry_wr(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
OCF_CACHE_LINE_ACCESS_WR);
return prev == OCF_CACHE_LINE_ACCESS_IDLE;
}
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
OCF_CACHE_LINE_ACCESS_ONE_RD);
return (prev == OCF_CACHE_LINE_ACCESS_IDLE);
}
static inline bool ocf_alock_trylock_entry_rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
return !!env_atomic_add_unless(access, 1, OCF_CACHE_LINE_ACCESS_WR);
}
static inline void ocf_alock_unlock_entry_wr(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
env_atomic_set(access, OCF_CACHE_LINE_ACCESS_IDLE);
}
static inline void ocf_alock_unlock_entry_rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);
ENV_BUG_ON(v == 0);
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
env_atomic_dec(access);
}
static inline bool ocf_alock_trylock_entry_wr2wr(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);
ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
return true;
}
static inline bool ocf_alock_trylock_entry_wr2rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);
ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD);
return true;
}
static inline bool ocf_alock_trylock_entry_rd2wr(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE);
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
v = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_ONE_RD,
OCF_CACHE_LINE_ACCESS_WR);
return (v == OCF_CACHE_LINE_ACCESS_ONE_RD);
}
static inline bool ocf_alock_trylock_entry_rd2rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int v = env_atomic_read(access);
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE);
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
return true;
}
static void ocf_alock_entry_locked(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
/* All cache entry locked, resume request */
OCF_DEBUG_RQ(req, "Resume");
ENV_BUG_ON(!cmpl);
env_atomic_dec(&alock->waiting);
cmpl(req);
}
}
bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx)
{
struct ocf_alock_waiter *waiter;
bool waiting = false;
unsigned long flags = 0;
ENV_BUG_ON(!cmpl);
if (ocf_alock_trylock_entry_wr(alock, entry)) {
/* lock was not owned by anyone */
ocf_alock_mark_index_locked(alock, req, idx, true);
ocf_alock_entry_locked(alock, req, cmpl);
return true;
}
waiter = env_allocator_new(alock->allocator);
if (!waiter)
return false;
ocf_alock_waitlist_lock(alock, entry, flags);
/* At the moment list is protected, double check if the cache entry is
* unlocked
*/
if (ocf_alock_trylock_entry_wr(alock, entry))
goto unlock;
/* Setup waiters filed */
waiter->entry = entry;
waiter->req = req;
waiter->idx = idx;
waiter->cmpl = cmpl;
waiter->rw = OCF_WRITE;
INIT_LIST_HEAD(&waiter->item);
/* Add to waiters list */
ocf_alock_waitlist_add(alock, entry, waiter);
waiting = true;
unlock:
ocf_alock_waitlist_unlock(alock, entry, flags);
if (!waiting) {
ocf_alock_mark_index_locked(alock, req, idx, true);
ocf_alock_entry_locked(alock, req, cmpl);
env_allocator_del(alock->allocator, waiter);
}
return true;
}
/*
* Attempt to lock cache entry for read.
* In case cache entry is locked, attempt to add caller on wait list.
*/
bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx)
{
struct ocf_alock_waiter *waiter;
bool waiting = false;
unsigned long flags = 0;
ENV_BUG_ON(!cmpl);
if( ocf_alock_trylock_entry_rd_idle(alock, entry)) {
/* lock was not owned by anyone */
ocf_alock_mark_index_locked(alock, req, idx, true);
ocf_alock_entry_locked(alock, req, cmpl);
return true;
}
waiter = env_allocator_new(alock->allocator);
if (!waiter)
return false;
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, entry, flags);
if (ocf_alock_waitlist_is_empty_locked(alock, entry)) {
/* No waiters at the moment */
/* Check if read lock can be obtained */
if (ocf_alock_trylock_entry_rd(alock, entry)) {
/* Cache entry locked */
goto unlock;
}
}
/* Setup waiters field */
waiter->entry = entry;
waiter->req = req;
waiter->idx = idx;
waiter->cmpl = cmpl;
waiter->rw = OCF_READ;
INIT_LIST_HEAD(&waiter->item);
/* Add to waiters list */
ocf_alock_waitlist_add(alock, entry, waiter);
waiting = true;
unlock:
ocf_alock_waitlist_unlock(alock, entry, flags);
if (!waiting) {
ocf_alock_mark_index_locked(alock, req, idx, true);
ocf_alock_entry_locked(alock, req, cmpl);
env_allocator_del(alock->allocator, waiter);
}
return true;
}
/*
* Unlocks the given read lock. If any waiters are registered for the same
* cacheline, one is awakened and the lock is either upgraded to a write lock
* or kept as a readlock. If there are no waiters, it's just unlocked.
*/
static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
bool locked = false;
bool exchanged = true;
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct ocf_alock_waiter *waiter;
struct list_head *iter, *next;
/*
* Lock exchange scenario
* 1. RD -> IDLE
* 2. RD -> RD
* 3. RD -> WR
*/
/* Check is requested page is on the list */
list_for_each_safe(iter, next, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
if (entry != waiter->entry)
continue;
if (exchanged) {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_rd2wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd2rd(alock, entry);
else
ENV_BUG();
} else {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd(alock, entry);
else
ENV_BUG();
}
if (locked) {
exchanged = false;
list_del(iter);
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
env_allocator_del(alock->allocator, waiter);
} else {
break;
}
}
if (exchanged) {
/* No exchange, no waiters on the list, unlock and return
* WR -> IDLE
*/
ocf_alock_unlock_entry_rd(alock, entry);
}
}
bool ocf_alock_trylock_one_rd(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
return ocf_alock_trylock_entry_rd_idle(alock, entry);
}
void ocf_alock_unlock_one_rd(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
unsigned long flags = 0;
OCF_DEBUG_CACHE(alock->cache, "Cache entry unlock one rd = %u", entry);
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, entry, flags);
ocf_alock_unlock_one_rd_common(alock, entry);
ocf_alock_waitlist_unlock(alock, entry, flags);
}
/*
* Unlocks the given write lock. If any waiters are registered for the same
* cacheline, one is awakened and the lock is either downgraded to a readlock
* or kept as a writelock. If there are no waiters, it's just unlocked.
*/
static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
bool locked = false;
bool exchanged = true;
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct ocf_alock_waiter *waiter;
struct list_head *iter, *next;
/*
* Lock exchange scenario
* 1. WR -> IDLE
* 2. WR -> RD
* 3. WR -> WR
*/
/* Check is requested page is on the list */
list_for_each_safe(iter, next, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
if (entry != waiter->entry)
continue;
if (exchanged) {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr2wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_wr2rd(alock, entry);
else
ENV_BUG();
} else {
if (waiter->rw == OCF_WRITE)
locked = ocf_alock_trylock_entry_wr(alock, entry);
else if (waiter->rw == OCF_READ)
locked = ocf_alock_trylock_entry_rd(alock, entry);
else
ENV_BUG();
}
if (locked) {
exchanged = false;
list_del(iter);
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
env_allocator_del(alock->allocator, waiter);
} else {
break;
}
}
if (exchanged) {
/* No exchange, no waiters on the list, unlock and return
* WR -> IDLE
*/
ocf_alock_unlock_entry_wr(alock, entry);
}
}
void ocf_alock_unlock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
unsigned long flags = 0;
OCF_DEBUG_CACHE(alock->cache, "Cache entry unlock one wr = %u", entry);
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, entry, flags);
ocf_alock_unlock_one_wr_common(alock, entry);
ocf_alock_waitlist_unlock(alock, entry, flags);
}
/*
* Safely remove cache entry lock waiter from waiting list.
* Request can be assigned with lock asynchronously at any point of time,
* so need to check lock state under a common lock.
*/
void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
struct ocf_request *req, ocf_cache_line_t entry, int i, int rw)
{
uint32_t idx = _WAITERS_LIST_ITEM(entry);
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
struct list_head *iter, *next;
struct ocf_alock_waiter *waiter;
unsigned long flags = 0;
ocf_alock_waitlist_lock(alock, entry, flags);
if (ocf_alock_is_index_locked(alock, req, i)) {
if (rw == OCF_READ)
ocf_alock_unlock_one_rd_common(alock, entry);
else
ocf_alock_unlock_one_wr_common(alock, entry);
ocf_alock_mark_index_locked(alock, req, i, false);
} else {
list_for_each_safe(iter, next, &lst->head) {
waiter = list_entry(iter, struct ocf_alock_waiter, item);
if (waiter->req == req) {
list_del(iter);
env_allocator_del(alock->allocator, waiter);
break;
}
}
}
ocf_alock_waitlist_unlock(alock, entry, flags);
}
int ocf_alock_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
int lock, status;
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
req->alock_rw = OCF_READ;
lock = alock->cbs->lock_entries_fast(alock, req, OCF_READ);
if (lock != OCF_LOCK_ACQUIRED) {
env_mutex_lock(&alock->lock);
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
ENV_BUG_ON(!cmpl);
env_atomic_inc(&alock->waiting);
env_atomic_set(&req->lock_remaining, req->core_line_count);
env_atomic_inc(&req->lock_remaining);
status = alock->cbs->lock_entries_slow(alock, req, OCF_READ, cmpl);
if (!status) {
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
lock = OCF_LOCK_ACQUIRED;
env_atomic_dec(&alock->waiting);
}
} else {
env_atomic_set(&req->lock_remaining, 0);
env_atomic_dec(&alock->waiting);
}
env_mutex_unlock(&alock->lock);
}
return lock;
}
int ocf_alock_lock_wr(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
int lock, status;
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
req->alock_rw = OCF_WRITE;
lock = alock->cbs->lock_entries_fast(alock, req, OCF_WRITE);
if (lock != OCF_LOCK_ACQUIRED) {
env_mutex_lock(&alock->lock);
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
ENV_BUG_ON(!cmpl);
env_atomic_inc(&alock->waiting);
env_atomic_set(&req->lock_remaining, req->core_line_count);
env_atomic_inc(&req->lock_remaining);
status = alock->cbs->lock_entries_slow(alock, req, OCF_WRITE, cmpl);
if (!status) {
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
lock = OCF_LOCK_ACQUIRED;
env_atomic_dec(&alock->waiting);
}
} else {
env_atomic_set(&req->lock_remaining, 0);
env_atomic_dec(&alock->waiting);
}
env_mutex_unlock(&alock->lock);
}
return lock;
}
bool ocf_cache_line_is_used(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
ENV_BUG_ON(entry >= alock->num_entries);
if (env_atomic_read(&(alock->access[entry])))
return true;
return !ocf_alock_waitlist_is_empty(alock, entry);
}
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
bool empty;
unsigned long flags = 0;
ENV_BUG_ON(entry >= alock->num_entries);
/* Lock waiters list */
ocf_alock_waitlist_lock(alock, entry, flags);
empty = ocf_alock_waitlist_is_empty_locked(alock, entry);
ocf_alock_waitlist_unlock(alock, entry, flags);
return empty;
}
/* NOTE: it is caller responsibility to assure that noone acquires
* a lock in background */
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
env_atomic *access = &alock->access[entry];
int val = env_atomic_read(access);
ENV_BUG_ON(val == OCF_CACHE_LINE_ACCESS_IDLE);
if (!ocf_alock_waitlist_is_empty(alock, entry))
return false;
return val == OCF_CACHE_LINE_ACCESS_ONE_RD ||
val == OCF_CACHE_LINE_ACCESS_WR;
}
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock)
{
return env_atomic_read(&alock->waiting);
}

93
src/utils/utils_alock.h Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright(c) 2012-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_UTILS_ALOCK_H_
#define OCF_UTILS_ALOCK_H_
/**
* @brief Lock result - Lock acquired successfully
*/
#define OCF_LOCK_ACQUIRED 0
/**
* @brief Lock result - Lock not acquired, lock request added into waiting list
*/
#define OCF_LOCK_NOT_ACQUIRED 1
struct ocf_alock;
/* async request cacheline lock acquisition callback */
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
typedef int (*ocf_cl_lock_fast)(struct ocf_alock *alock,
struct ocf_request *req, int rw);
typedef int (*ocf_cl_lock_slow)(struct ocf_alock *alock,
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl);
struct ocf_alock_lock_cbs
{
ocf_cl_lock_fast lock_entries_fast;
ocf_cl_lock_slow lock_entries_slow;
};
bool ocf_alock_trylock_one_rd(struct ocf_alock *alock,
ocf_cache_line_t entry);
void ocf_alock_unlock_one_rd(struct ocf_alock *alock,
const ocf_cache_line_t entry);
bool ocf_alock_trylock_entry_wr(struct ocf_alock *alock,
ocf_cache_line_t entry);
void ocf_alock_unlock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry_idx);
int ocf_alock_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
int ocf_alock_lock_wr(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
ocf_cache_line_t entry);
bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
ocf_cache_line_t entry);
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock);
size_t ocf_alock_obj_size(void);
int ocf_alock_init_inplace(struct ocf_alock *self, unsigned num_entries,
const char* name, struct ocf_alock_lock_cbs *cbs, ocf_cache_t cache);
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, struct ocf_alock_lock_cbs *cbs, ocf_cache_t cache);
void ocf_alock_deinit(struct ocf_alock **self);
size_t ocf_alock_size(unsigned num_entries);
bool ocf_alock_is_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index);
void ocf_alock_mark_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, bool locked);
bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx);
bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx);
void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
struct ocf_request *req, ocf_cache_line_t entry, int i, int rw);
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry);
#endif

View File

@ -8,7 +8,6 @@ from ctypes import c_void_p, Structure, c_char_p, cast, pointer, byref, c_int
from .logger import LoggerOps, Logger
from .data import DataOps, Data
from .cleaner import CleanerOps, Cleaner
from .metadata_updater import MetadataUpdaterOps, MetadataUpdater
from .shared import OcfError
from ..ocf import OcfLib
from .queue import Queue
@ -19,7 +18,6 @@ class OcfCtxOps(Structure):
_fields_ = [
("data", DataOps),
("cleaner", CleanerOps),
("metadata_updater", MetadataUpdaterOps),
("logger", LoggerOps),
]
@ -29,10 +27,9 @@ class OcfCtxCfg(Structure):
class OcfCtx:
def __init__(self, lib, name, logger, data, mu, cleaner):
def __init__(self, lib, name, logger, data, cleaner):
self.logger = logger
self.data = data
self.mu = mu
self.cleaner = cleaner
self.ctx_handle = c_void_p()
self.lib = lib
@ -45,7 +42,6 @@ class OcfCtx:
ops=OcfCtxOps(
data=self.data.get_ops(),
cleaner=self.cleaner.get_ops(),
metadata_updater=self.mu.get_ops(),
logger=logger.get_ops(),
),
logger_priv=cast(pointer(logger.get_priv()), c_void_p),
@ -98,7 +94,6 @@ class OcfCtx:
self.cfg = None
self.logger = None
self.data = None
self.mu = None
self.cleaner = None
Queue._instances_ = {}
Volume._instances_ = {}
@ -112,7 +107,6 @@ def get_default_ctx(logger):
b"PyOCF default ctx",
logger,
Data,
MetadataUpdater,
Cleaner,
)

View File

@ -1,102 +0,0 @@
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from ctypes import c_void_p, c_int, c_uint32, Structure, CFUNCTYPE
from threading import Thread, Event
from ..ocf import OcfLib
class MetadataUpdaterOps(Structure):
INIT = CFUNCTYPE(c_int, c_void_p)
KICK = CFUNCTYPE(None, c_void_p)
STOP = CFUNCTYPE(None, c_void_p)
_fields_ = [("_init", INIT), ("_kick", KICK), ("_stop", STOP)]
class MetadataUpdater:
pass
def mu_run(*, mu: MetadataUpdater, kick: Event, stop: Event):
while True:
kick.clear()
if OcfLib.getInstance().ocf_metadata_updater_run(mu):
continue
kick.wait()
if stop.is_set():
break
class MetadataUpdater:
_instances_ = {}
ops = None
def __init__(self, ref):
self._as_parameter_ = ref
MetadataUpdater._instances_[ref] = self
self.kick_event = Event()
self.stop_event = Event()
lib = OcfLib.getInstance()
self.thread = Thread(
group=None,
target=mu_run,
name="mu-{}".format(
lib.ocf_cache_get_name(lib.ocf_metadata_updater_get_cache(self))
),
kwargs={"mu": self, "kick": self.kick_event, "stop": self.stop_event},
)
self.thread.start()
@classmethod
def get_ops(cls):
if not cls.ops:
cls.ops = MetadataUpdaterOps(
_init=cls._init, _kick=cls._kick, _stop=cls._stop
)
return cls.ops
@classmethod
def get_instance(cls, ref):
return cls._instances_[ref]
@classmethod
def del_instance(cls, ref):
del cls._instances_[ref]
@staticmethod
@MetadataUpdaterOps.INIT
def _init(ref):
m = MetadataUpdater(ref)
return 0
@staticmethod
@MetadataUpdaterOps.KICK
def _kick(ref):
MetadataUpdater.get_instance(ref).kick()
@staticmethod
@MetadataUpdaterOps.STOP
def _stop(ref):
MetadataUpdater.get_instance(ref).stop()
del MetadataUpdater._instances_[ref]
def kick(self):
self.kick_event.set()
def stop(self):
self.stop_event.set()
self.kick_event.set()
lib = OcfLib.getInstance()
lib.ocf_metadata_updater_get_cache.argtypes = [c_void_p]
lib.ocf_metadata_updater_get_cache.restype = c_void_p
lib.ocf_metadata_updater_run.argtypes = [c_void_p]
lib.ocf_metadata_updater_run.restype = c_uint32

View File

@ -39,6 +39,11 @@
* _ocf_req_trylock_wr
* _req_on_lock
* ocf_cache_line_are_waiters
* ocf_cl_lock_line_needs_lock
* ocf_cl_lock_line_get_entry
* ocf_cl_lock_line_is_acting
* ocf_cl_lock_line_slow
* ocf_cl_lock_line_fast
* </functions_to_leave>
*/
@ -64,8 +69,100 @@
#include "concurrency/ocf_cache_line_concurrency.c/ocf_cache_line_concurrency_generated_wraps.c"
#include "../utils/utils_alock.c"
#define LOCK_WAIT_TIMEOUT 5
int __wrap_ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, struct ocf_alock_lock_cbs *cbs, ocf_cache_t cache)
{
return ocf_alock_init(self, num_entries, name, cbs, cache);
}
void __wrap_ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
struct ocf_request *req, ocf_cache_line_t entry, int i, int rw)
{
ocf_alock_waitlist_remove_entry(alock, req, entry, i, rw);
}
void __wrap_ocf_alock_deinit(struct ocf_alock **self)
{
ocf_alock_deinit(self);
}
void __wrap_ocf_alock_mark_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, _Bool locked)
{
ocf_alock_mark_index_locked(alock, req, index, locked);
}
void __wrap_ocf_alock_unlock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry_idx)
{
ocf_alock_unlock_one_wr(alock, entry_idx);
}
int __wrap_ocf_alock_lock_wr(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
return ocf_alock_lock_wr(alock, req, cmpl);
}
int __wrap_ocf_alock_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
return ocf_alock_lock_rd(alock, req, cmpl);
}
void __wrap_ocf_alock_unlock_one_rd(struct ocf_alock *alock,
const ocf_cache_line_t entry)
{
ocf_alock_unlock_one_rd(alock, entry);
}
void __wrap_ocf_alock_is_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index)
{
ocf_alock_is_index_locked(alock, req, index);
}
bool __wrap_ocf_alock_lock_one_wr(struct ocf_alock *alock,
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx)
{
usleep(rand() % 100);
return ocf_alock_lock_one_wr(alock, entry, cmpl, req, idx);
}
bool __wrap_ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
ocf_cache_line_t entry)
{
return ocf_alock_trylock_entry_rd_idle(alock, entry);
}
bool __wrap_ocf_alock_lock_one_rd(struct ocf_alock *alock, const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
void *req, uint32_t idx)
{
usleep(rand() % 100);
return ocf_alock_lock_one_rd(alock, entry, cmpl, req, idx);
}
bool __wrap_ocf_alock_waitlist_is_empty(struct ocf_alock *alock, ocf_cache_line_t entry)
{
return ocf_alock_waitlist_is_empty(alock, entry);
}
bool __wrap_ocf_alock_trylock_one_rd(struct ocf_alock *alock, ocf_cache_line_t entry)
{
return ocf_alock_trylock_one_rd(alock, entry);
}
bool __wrap_ocf_alock_trylock_entry_wr(struct ocf_alock *alock, ocf_cache_line_t entry)
{
return ocf_alock_trylock_entry_wr(alock, entry);
}
void __wrap___assert_fail (const char *__assertion, const char *__file,
unsigned int __line, const char *__function)
{
@ -125,32 +222,25 @@ int __wrap_snprintf (char *__restrict __s, size_t __maxlen,
return ret;
}
static inline bool __wrap___lock_cache_line_wr(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id)
ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache)
{
usleep(rand() % 100);
return __real___lock_cache_line_wr(c, line, cb, ctx, ctx_id);
return NULL;
}
static inline bool __wrap___lock_cache_line_rd(struct ocf_cache_line_concurrency *c,
const ocf_cache_line_t line, ocf_req_async_lock_cb cb,
void *ctx, uint32_t ctx_id)
int __wrap_ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl, const char *fmt, ...)
{
usleep(rand() % 100);
return __real___lock_cache_line_rd(c, line, cb, ctx, ctx_id);
}
char buf[1024];
int __wrap__ocf_req_lock_wr(struct ocf_request *req, ocf_req_async_lock_cb cb)
{
usleep(rand() % 500);
return __real__ocf_req_lock_wr(req, cb);
}
va_list args;
int ret;
int __wrap__ocf_req_lock_rd(struct ocf_request *req, ocf_req_async_lock_cb cb)
{
usleep(rand() % 500);
return __real__ocf_req_lock_wr(req, cb);
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printf(buf);
return 0;
}
unsigned long long progress;
@ -160,6 +250,7 @@ pthread_mutex_t prog_mutex = PTHREAD_MUTEX_INITIALIZER;
struct test_req {
struct ocf_request r;
struct ocf_map_info map[TEST_MAX_MAP_SIZE];
uint8_t alock_map[TEST_MAX_MAP_SIZE];
pthread_cond_t completion;
pthread_mutex_t completion_mutex;
bool finished;
@ -249,6 +340,7 @@ void thread(void *_ctx)
bool locked;
ctx->treq.r.map = &ctx->treq.map;
ctx->treq.r.alock_status = &ctx->treq.alock_map;
pthread_cond_init(&ctx->treq.completion, NULL);
pthread_mutex_init(&ctx->treq.completion_mutex, NULL);
@ -261,7 +353,7 @@ void thread(void *_ctx)
while (i-- && !ctx->terminated)
{
rw = rand() % 2;
single = (rand() % 4 == 0);
single = (rand() % 5 == 0);
if (!single) {
shuffle(permutation, ctx->clines);
@ -399,12 +491,13 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline
{
if (!threads[i].finished)
{
unsigned num_clines = threads[i].treq.r.core_line_count;
struct ocf_request *req = &threads[i].treq.r;
unsigned num_clines = req->core_line_count;
struct ocf_map_info **clines = malloc(num_clines *
sizeof(*clines));
for (j = 0; j < num_clines; j++)
{
clines[j] = &threads[i].treq.r.map[j];
clines[j] = &req->map[j];
}
qsort(clines, num_clines, sizeof(*clines), cmp_map);
@ -412,8 +505,8 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline
print_message("thread no %u\n", i);
for (j = 0; j < num_clines; j++) {
struct ocf_map_info *map = clines[j];
const char *status = map->rd_locked ? "R" :
map->wr_locked ? "W" : "X";
const char *status = env_bit_test(index, (unsigned long*)req->alock_status) ?
(req->alock_rw == OCF_WRITE ? "W" : "R") : "X";
print_message("[%u] %u %s\n", j, map->coll_idx, status);
}