Merge pull request #156 from robertbaldyga/cache-async-lock

Introduce asynchronous cache lock
This commit is contained in:
Robert Bałdyga 2019-05-28 11:27:03 +02:00 committed by GitHub
commit 5fcbb938b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 527 additions and 169 deletions

View File

@ -42,9 +42,6 @@ typedef enum {
/** Start cache failure */
OCF_ERR_START_CACHE_FAIL,
/** Cache is busy */
OCF_ERR_CACHE_IN_USE,
/** Cache ID does not exist */
OCF_ERR_CACHE_NOT_EXIST,

View File

@ -120,18 +120,28 @@ int ocf_mngt_cache_get(ocf_cache_t cache);
*/
void ocf_mngt_cache_put(ocf_cache_t cache);
/**
* @brief Lock cache for management oparations (write lock, exclusive)
* @param[in] cache Handle to cache
* @param[in] error Status error code. Can be one of the following:
* 0 Cache successfully locked
* -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already stopping
* -OCF_ERR_NO_MEM Cannot allocate needed memory
* -OCF_ERR_INTR Wait operation interrupted
*/
typedef void (*ocf_mngt_cache_lock_end_t)(ocf_cache_t cache,
void *priv, int error);
/**
* @brief Lock cache for management oparations (write lock, exclusive)
*
* @param[in] cache Handle to cache
*
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_INTR Wait operation interrupted
* @param[in] cmpl Completion callback
* @param[in] priv Private context of completion callback
*/
int ocf_mngt_cache_lock(ocf_cache_t cache);
void ocf_mngt_cache_lock(ocf_cache_t cache,
ocf_mngt_cache_lock_end_t cmpl, void *priv);
/**
* @brief Lock cache for read - assures cache config does not change while
@ -139,14 +149,11 @@ int ocf_mngt_cache_lock(ocf_cache_t cache);
* read lock in parallel.
*
* @param[in] cache Handle to cache
*
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_INTR Wait operation interrupted
* @param[in] cmpl Completion callback
* @param[in] priv Private context of completion callback
*/
int ocf_mngt_cache_read_lock(ocf_cache_t cache);
void ocf_mngt_cache_read_lock(ocf_cache_t cache,
ocf_mngt_cache_lock_end_t cmpl, void *priv);
/**
* @brief Lock cache for management oparations (write lock, exclusive)
@ -156,7 +163,6 @@ int ocf_mngt_cache_read_lock(ocf_cache_t cache);
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_NO_LOCK Lock not acquired
*/
int ocf_mngt_cache_trylock(ocf_cache_t cache);
@ -171,7 +177,6 @@ int ocf_mngt_cache_trylock(ocf_cache_t cache);
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_NO_LOCK Lock not acquired
*/
int ocf_mngt_cache_read_trylock(ocf_cache_t cache);

View File

@ -111,7 +111,7 @@ static void ocf_cleaner_run_complete(ocf_cleaner_t cleaner, uint32_t interval)
{
ocf_cache_t cache = ocf_cleaner_get_cache(cleaner);
env_rwsem_up_write(&cache->lock);
ocf_async_unlock(&cache->lock);
cleaner->end(cleaner, interval);
ocf_queue_put(cleaner->io_queue);
@ -131,19 +131,19 @@ void ocf_cleaner_run(ocf_cleaner_t cleaner, ocf_queue_t queue)
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_is_cache_locked(cache)) {
ocf_mngt_cache_is_locked(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
return;
}
/* Sleep in case there is management operation in progress. */
if (env_rwsem_down_write_trylock(&cache->lock)) {
if (ocf_mngt_cache_trylock(cache)) {
cleaner->end(cleaner, SLEEP_TIME_MS);
return;
}
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
env_rwsem_up_write(&cache->lock);
ocf_mngt_cache_unlock(cache);
cleaner->end(cleaner, SLEEP_TIME_MS);
return;
}

View File

@ -328,7 +328,7 @@ static void evp_lru_clean(ocf_cache_t cache, ocf_queue_t io_queue,
};
int cnt;
if (ocf_mngt_is_cache_locked(cache))
if (ocf_mngt_cache_is_locked(cache))
return;
cnt = ocf_refcnt_inc(counter);

View File

@ -17,6 +17,7 @@
#include "../utils/utils_cache_line.h"
#include "../utils/utils_pipeline.h"
#include "../utils/utils_refcnt.h"
#include "../utils/utils_async_lock.h"
#include "../concurrency/ocf_concurrency.h"
#include "../eviction/ops.h"
#include "../ocf_ctx_priv.h"
@ -551,8 +552,15 @@ static int _ocf_mngt_init_new_cache(struct ocf_cachemng_init_params *params)
if (!cache)
return -OCF_ERR_NO_MEM;
if (env_rwsem_init(&cache->lock) ||
env_mutex_init(&cache->flush_mutex)) {
if (ocf_mngt_cache_lock_init(cache)) {
env_vfree(cache);
return -OCF_ERR_NO_MEM;
}
/* Lock cache during setup - this trylock should always succeed */
ENV_BUG_ON(ocf_mngt_cache_trylock(cache));
if (env_mutex_init(&cache->flush_mutex)) {
env_vfree(cache);
return -OCF_ERR_NO_MEM;
}
@ -694,7 +702,6 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cachemng_init_params *param,
cache->backfill.max_queue_size = cfg->backfill.max_queue_size;
cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size;
env_rwsem_down_write(&cache->lock); /* Lock cache during setup */
param->flags.cache_locked = true;
cache->pt_unaligned_io = cfg->pt_unaligned_io;
@ -1216,17 +1223,11 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
ocf_ctx_get(ctx);
if (params.locked) {
/* Increment reference counter to match cache_lock /
cache_unlock convention. User is expected to call
ocf_mngt_cache_unlock in future which would up the
semaphore as well as decrement ref_count. */
ocf_refcnt_inc(&(*cache)->refcnt.cache);
} else {
if (!params.locked) {
/* User did not request to lock cache instance after creation -
up the semaphore here since we have acquired the lock to
unlock it here since we have acquired the lock to
perform management operations. */
env_rwsem_up_write(&(*cache)->lock);
ocf_mngt_cache_unlock(*cache);
params.flags.cache_locked = false;
}

View File

@ -126,6 +126,7 @@ void ocf_mngt_cache_put(ocf_cache_t cache)
if (ocf_refcnt_dec(&cache->refcnt.cache) == 0) {
ctx = cache->owner;
ocf_metadata_deinit(cache);
ocf_mngt_cache_lock_deinit(cache);
env_vfree(cache);
ocf_ctx_put(ctx);
}
@ -175,94 +176,160 @@ int ocf_mngt_cache_get_by_id(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *
return error;
}
bool ocf_mngt_is_cache_locked(ocf_cache_t cache)
typedef void (*ocf_lock_fn_t)(ocf_async_lock_waiter_t waiter);
typedef int (*ocf_trylock_fn_t)(ocf_async_lock_t lock);
typedef void (*ocf_unlock_fn_t)(ocf_async_lock_t lock);
struct ocf_mngt_cache_lock_context {
ocf_cache_t cache;
ocf_unlock_fn_t unlock_fn;
ocf_mngt_cache_lock_end_t cmpl;
void *priv;
};
static void _ocf_mngt_cache_lock_complete(
ocf_async_lock_waiter_t waiter, int error)
{
if (env_rwsem_is_locked(&cache->lock))
return true;
struct ocf_mngt_cache_lock_context *context;
ocf_cache_t cache;
if (env_atomic_read(&cache->lock_waiter))
return true;
context = ocf_async_lock_waiter_get_priv(waiter);
cache = context->cache;
return false;
if (error) {
ocf_mngt_cache_put(cache);
goto out;
}
if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
/* Cache already stopping, do not allow any operation */
context->unlock_fn(ocf_async_lock_waiter_get_lock(waiter));
ocf_mngt_cache_put(cache);
error = -OCF_ERR_CACHE_NOT_EXIST;
}
out:
context->cmpl(context->cache, context->priv, error);
}
static void _ocf_mngt_cache_lock(ocf_cache_t cache,
ocf_mngt_cache_lock_end_t cmpl, void *priv,
ocf_lock_fn_t lock_fn, ocf_unlock_fn_t unlock_fn)
{
ocf_async_lock_waiter_t waiter;
struct ocf_mngt_cache_lock_context *context;
if (ocf_mngt_cache_get(cache))
OCF_CMPL_RET(cache, priv, -OCF_ERR_CACHE_NOT_EXIST);
waiter = ocf_async_lock_new_waiter(&cache->lock,
_ocf_mngt_cache_lock_complete);
if (!waiter) {
ocf_mngt_cache_put(cache);
OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
}
context = ocf_async_lock_waiter_get_priv(waiter);
context->cache = cache;
context->unlock_fn = unlock_fn;
context->cmpl = cmpl;
context->priv = priv;
lock_fn(waiter);
}
static int _ocf_mngt_cache_trylock(ocf_cache_t cache,
ocf_trylock_fn_t trylock_fn, ocf_unlock_fn_t unlock_fn)
{
int result;
if (ocf_mngt_cache_get(cache))
return -OCF_ERR_CACHE_NOT_EXIST;
result = trylock_fn(&cache->lock);
if (result)
return result;
if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
/* Cache already stopping, do not allow any operation */
unlock_fn(&cache->lock);
return -OCF_ERR_CACHE_NOT_EXIST;
}
return 0;
}
static void _ocf_mngt_cache_unlock(ocf_cache_t cache,
void (*unlock_fn)(env_rwsem *s))
ocf_unlock_fn_t unlock_fn)
{
unlock_fn(&cache->lock);
ocf_mngt_cache_put(cache);
}
void ocf_mngt_cache_unlock(ocf_cache_t cache)
int ocf_mngt_cache_lock_init(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
_ocf_mngt_cache_unlock(cache, env_rwsem_up_write);
return ocf_async_lock_init(&cache->lock,
sizeof(struct ocf_mngt_cache_lock_context));
}
void ocf_mngt_cache_read_unlock(ocf_cache_t cache)
void ocf_mngt_cache_lock_deinit(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
_ocf_mngt_cache_unlock(cache, env_rwsem_up_read);
ocf_async_lock_deinit(&cache->lock);
}
static int _ocf_mngt_cache_lock(ocf_cache_t cache, int (*lock_fn)(env_rwsem *s),
void (*unlock_fn)(env_rwsem *s))
{
int ret;
/* Increment reference counter */
if (!ocf_refcnt_inc(&cache->refcnt.cache))
return -OCF_ERR_CACHE_NOT_EXIST;
env_atomic_inc(&cache->lock_waiter);
ret = lock_fn(&cache->lock);
env_atomic_dec(&cache->lock_waiter);
if (ret) {
ocf_mngt_cache_put(cache);
return ret;
}
if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
/* Cache already stooping, do not allow any operation */
ret = -OCF_ERR_CACHE_NOT_EXIST;
goto unlock;
}
return 0;
unlock:
_ocf_mngt_cache_unlock(cache, unlock_fn);
return ret;
}
int ocf_mngt_cache_lock(ocf_cache_t cache)
void ocf_mngt_cache_lock(ocf_cache_t cache,
ocf_mngt_cache_lock_end_t cmpl, void *priv)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, env_rwsem_down_write_interruptible,
env_rwsem_up_write);
}
int ocf_mngt_cache_read_lock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, env_rwsem_down_read_interruptible,
env_rwsem_up_read);
_ocf_mngt_cache_lock(cache, cmpl, priv,
ocf_async_lock, ocf_async_unlock);
}
int ocf_mngt_cache_trylock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, env_rwsem_down_write_trylock,
env_rwsem_up_write);
return _ocf_mngt_cache_trylock(cache,
ocf_async_trylock, ocf_async_unlock);
}
void ocf_mngt_cache_unlock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
_ocf_mngt_cache_unlock(cache, ocf_async_unlock);
}
void ocf_mngt_cache_read_lock(ocf_cache_t cache,
ocf_mngt_cache_lock_end_t cmpl, void *priv)
{
OCF_CHECK_NULL(cache);
_ocf_mngt_cache_lock(cache, cmpl, priv,
ocf_async_read_lock, ocf_async_read_unlock);
}
int ocf_mngt_cache_read_trylock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return _ocf_mngt_cache_lock(cache, env_rwsem_down_read_trylock,
env_rwsem_up_read);
return _ocf_mngt_cache_trylock(cache,
ocf_async_read_trylock, ocf_async_read_unlock);
}
void ocf_mngt_cache_read_unlock(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
_ocf_mngt_cache_unlock(cache, ocf_async_read_unlock);
}
bool ocf_mngt_cache_is_locked(ocf_cache_t cache)
{
return ocf_async_is_locked(&cache->lock);
}
/* if cache is either fully initialized or during recovery */
@ -305,7 +372,6 @@ static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
}
list_for_each_entry(iter, &ocf_ctx->caches, list) {
if (_ocf_mngt_cache_try_get(iter))
(*list)[i++] = iter;
}

View File

@ -28,6 +28,9 @@ int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache,
ocf_part_id_t part_id, const char *name, uint32_t min_size,
uint32_t max_size, uint8_t priority, bool valid);
bool ocf_mngt_is_cache_locked(ocf_cache_t cache);
int ocf_mngt_cache_lock_init(ocf_cache_t cache);
void ocf_mngt_cache_lock_deinit(ocf_cache_t cache);
bool ocf_mngt_cache_is_locked(ocf_cache_t cache);
#endif /* __OCF_MNGT_COMMON_H__ */

View File

@ -15,6 +15,7 @@
#include "metadata/metadata_updater_priv.h"
#include "utils/utils_list.h"
#include "utils/utils_refcnt.h"
#include "utils/utils_async_lock.h"
#include "ocf_stats_priv.h"
#include "cleaning/cleaning.h"
#include "ocf_logger_priv.h"
@ -200,10 +201,11 @@ struct ocf_cache {
struct ocf_cleaner cleaner;
struct ocf_metadata_updater metadata_updater;
env_rwsem lock;
env_atomic lock_waiter;
/*!< most of the time this variable is set to 0, unless user requested
*!< interruption of flushing process via ioctl/
struct ocf_async_lock lock;
/*
* Most of the time this variable is set to 0, unless user requested
* interruption of flushing process.
*/
int flushing_interrupted;
env_mutex flush_mutex;

View File

@ -0,0 +1,238 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "utils_async_lock.h"
struct ocf_async_lock_waiter {
struct list_head list;
ocf_async_lock_t lock;
bool write_lock;
ocf_async_lock_end_t cmpl;
};
void _ocf_async_lock_collect_waiters(ocf_async_lock_t lock,
struct list_head *waiters)
{
ocf_async_lock_waiter_t iter, temp;
list_for_each_entry_safe(iter, temp, &lock->waiters, list) {
if (!iter->write_lock) {
list_move_tail(&iter->list, waiters);
lock->rd++;
} else {
if (!lock->rd) {
list_move_tail(&iter->list, waiters);
lock->wr = 1;
}
break;
}
}
}
void _ocf_async_lock_run_waiters(struct ocf_async_lock *lock,
struct list_head *waiters, int status)
{
ocf_async_lock_waiter_t iter, temp;
/* TODO: Should we run waiters asynchronously? */
list_for_each_entry_safe(iter, temp, waiters, list) {
list_del(&iter->list);
iter->cmpl(iter, status);
env_vfree(iter);
}
}
int ocf_async_lock_init(struct ocf_async_lock *lock, uint32_t waiter_priv_size)
{
int result;
result = env_mutex_init(&lock->mutex);
if (result)
return result;
INIT_LIST_HEAD(&lock->waiters);
lock->rd = 0;
lock->wr = 0;
lock->waiter_priv_size = waiter_priv_size;
return 0;
}
void ocf_async_lock_deinit(struct ocf_async_lock *lock)
{
struct list_head waiters;
ocf_async_lock_waiter_t iter, temp;
INIT_LIST_HEAD(&waiters);
env_mutex_lock(&lock->mutex);
list_for_each_entry_safe(iter, temp, &lock->waiters, list)
list_move_tail(&iter->list, &waiters);
env_mutex_unlock(&lock->mutex);
_ocf_async_lock_run_waiters(lock, &waiters, -OCF_ERR_NO_LOCK);
}
ocf_async_lock_waiter_t ocf_async_lock_new_waiter(ocf_async_lock_t lock,
ocf_async_lock_end_t cmpl)
{
ocf_async_lock_waiter_t waiter;
waiter = env_vmalloc(sizeof(*waiter) + lock->waiter_priv_size);
if (!waiter)
return NULL;
waiter->lock = lock;
waiter->cmpl = cmpl;
return waiter;
}
ocf_async_lock_t ocf_async_lock_waiter_get_lock(ocf_async_lock_waiter_t waiter)
{
return waiter->lock;
}
void *ocf_async_lock_waiter_get_priv(ocf_async_lock_waiter_t waiter)
{
return (void *)waiter + sizeof(*waiter);
}
static int _ocf_async_trylock(struct ocf_async_lock *lock)
{
if (lock->wr || lock->rd)
return -OCF_ERR_NO_LOCK;
lock->wr = 1;
return 0;
}
void ocf_async_lock(ocf_async_lock_waiter_t waiter)
{
ocf_async_lock_t lock = waiter->lock;
int result;
env_mutex_lock(&lock->mutex);
result = _ocf_async_trylock(lock);
if (!result) {
env_mutex_unlock(&lock->mutex);
waiter->cmpl(waiter, 0);
env_vfree(waiter);
return;
}
waiter->write_lock = true;
list_add_tail(&waiter->list, &lock->waiters);
env_mutex_unlock(&lock->mutex);
}
int ocf_async_trylock(struct ocf_async_lock *lock)
{
int result;
env_mutex_lock(&lock->mutex);
result = _ocf_async_trylock(lock);
env_mutex_unlock(&lock->mutex);
return result;
}
void ocf_async_unlock(struct ocf_async_lock *lock)
{
struct list_head waiters;
INIT_LIST_HEAD(&waiters);
env_mutex_lock(&lock->mutex);
ENV_BUG_ON(lock->rd);
ENV_BUG_ON(!lock->wr);
lock->wr = 0;
_ocf_async_lock_collect_waiters(lock, &waiters);
env_mutex_unlock(&lock->mutex);
_ocf_async_lock_run_waiters(lock, &waiters, 0);
}
static int _ocf_async_read_trylock(struct ocf_async_lock *lock)
{
if (lock->wr || !list_empty(&lock->waiters))
return -OCF_ERR_NO_LOCK;
lock->rd++;
return 0;
}
void ocf_async_read_lock(ocf_async_lock_waiter_t waiter)
{
ocf_async_lock_t lock = waiter->lock;
int result;
env_mutex_lock(&lock->mutex);
result = _ocf_async_read_trylock(lock);
if (!result) {
env_mutex_unlock(&lock->mutex);
waiter->cmpl(waiter, 0);
env_vfree(waiter);
return;
}
waiter->write_lock = false;
list_add_tail(&waiter->list, &lock->waiters);
env_mutex_unlock(&lock->mutex);
}
int ocf_async_read_trylock(struct ocf_async_lock *lock)
{
int result;
env_mutex_lock(&lock->mutex);
result = _ocf_async_read_trylock(lock);
env_mutex_unlock(&lock->mutex);
return result;
}
void ocf_async_read_unlock(struct ocf_async_lock *lock)
{
struct list_head waiters;
INIT_LIST_HEAD(&waiters);
env_mutex_lock(&lock->mutex);
ENV_BUG_ON(!lock->rd);
ENV_BUG_ON(lock->wr);
if (--lock->rd) {
env_mutex_unlock(&lock->mutex);
return;
}
_ocf_async_lock_collect_waiters(lock, &waiters);
env_mutex_unlock(&lock->mutex);
_ocf_async_lock_run_waiters(lock, &waiters, 0);
}
bool ocf_async_is_locked(struct ocf_async_lock *lock)
{
bool locked;
env_mutex_lock(&lock->mutex);
locked = lock->rd || lock->wr;
env_mutex_unlock(&lock->mutex);
return locked;
}

View File

@ -0,0 +1,50 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_ASYNC_LOCK_H__
#define __UTILS_ASYNC_LOCK_H__
#include "ocf_env.h"
struct ocf_async_lock {
struct list_head waiters;
env_mutex mutex;
uint32_t rd;
uint32_t wr;
uint32_t waiter_priv_size;
};
typedef struct ocf_async_lock *ocf_async_lock_t;
typedef struct ocf_async_lock_waiter *ocf_async_lock_waiter_t;
typedef void (*ocf_async_lock_end_t)(ocf_async_lock_waiter_t waiter, int error);
int ocf_async_lock_init(ocf_async_lock_t lock, uint32_t waiter_priv_size);
void ocf_async_lock_deinit(ocf_async_lock_t lock);
ocf_async_lock_waiter_t ocf_async_lock_new_waiter(ocf_async_lock_t lock,
ocf_async_lock_end_t cmpl);
ocf_async_lock_t ocf_async_lock_waiter_get_lock(ocf_async_lock_waiter_t waiter);
void *ocf_async_lock_waiter_get_priv(ocf_async_lock_waiter_t waiter);
void ocf_async_lock(ocf_async_lock_waiter_t waiter);
int ocf_async_trylock(struct ocf_async_lock *lock);
void ocf_async_unlock(struct ocf_async_lock *lock);
void ocf_async_read_lock(ocf_async_lock_waiter_t waiter);
int ocf_async_read_trylock(struct ocf_async_lock *lock);
void ocf_async_read_unlock(struct ocf_async_lock *lock);
bool ocf_async_is_locked(struct ocf_async_lock *lock);
#endif /* __UTILS_ASYNC_LOCK_H__ */

View File

@ -1,3 +1,8 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"

View File

@ -186,24 +186,24 @@ class Cache:
self.started = True
def change_cache_mode(self, cache_mode: CacheMode):
self.get_and_write_lock()
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_set_mode(
self.cache_handle, cache_mode
)
self.put_and_write_unlock()
self.write_unlock()
if status:
raise OcfError("Error changing cache mode", status)
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
self.get_and_write_lock()
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
self.cache_handle, cleaning_policy
)
self.put_and_write_unlock()
self.write_unlock()
if status:
raise OcfError("Error changing cleaning policy", status)
@ -211,25 +211,25 @@ class Cache:
def set_cleaning_policy_param(
self, cleaning_policy: CleaningPolicy, param_id, param_value
):
self.get_and_write_lock()
self.write_lock()
status = self.owner.lib.ocf_mngt_cache_cleaning_set_param(
self.cache_handle, cleaning_policy, param_id, param_value
)
self.put_and_write_unlock()
self.write_unlock()
if status:
raise OcfError("Error setting cleaning policy param", status)
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.get_and_write_lock()
self.write_lock()
status = self.owner.lib.ocf_mngt_core_set_seq_cutoff_policy_all(
self.cache_handle, policy
)
self.put_and_write_unlock()
self.write_unlock()
if status:
raise OcfError("Error setting cache seq cut off policy", status)
@ -261,7 +261,7 @@ class Cache:
self, device, force=False, perform_test=False, cache_line_size=None
):
self.configure_device(device, force, perform_test, cache_line_size)
self.get_and_write_lock()
self.write_lock()
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
@ -272,7 +272,7 @@ class Cache:
)
c.wait()
self.put_and_write_unlock()
self.write_unlock()
if c.results["error"]:
raise OcfError("Attaching cache device failed", c.results["error"])
@ -316,26 +316,6 @@ class Cache:
return c
def _get_and_lock(self, read=True):
self.get()
if read:
status = self.owner.lib.ocf_mngt_cache_read_lock(self.cache_handle)
else:
status = self.owner.lib.ocf_mngt_cache_lock(self.cache_handle)
if status:
self.put()
raise OcfError("Couldn't lock cache instance", status)
def _put_and_unlock(self, read=True):
if read:
self.owner.lib.ocf_mngt_cache_read_unlock(self.cache_handle)
else:
self.owner.lib.ocf_mngt_cache_unlock(self.cache_handle)
self.put()
def put(self):
self.owner.lib.ocf_mngt_cache_put(self.cache_handle)
@ -344,20 +324,32 @@ class Cache:
if status:
raise OcfError("Couldn't get cache instance", status)
def get_and_read_lock(self):
self._get_and_lock(True)
def read_lock(self):
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
self.owner.lib.ocf_mngt_cache_read_lock(self.cache_handle, c, None)
c.wait()
if c.results["error"]:
raise OcfError("Couldn't lock cache instance", c.results["error"])
def get_and_write_lock(self):
self._get_and_lock(False)
def write_lock(self):
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
self.owner.lib.ocf_mngt_cache_lock(self.cache_handle, c, None)
c.wait()
if c.results["error"]:
raise OcfError("Couldn't lock cache instance", c.results["error"])
def put_and_read_unlock(self):
self._put_and_unlock(True)
def read_unlock(self):
self.owner.lib.ocf_mngt_cache_read_unlock(self.cache_handle)
def put_and_write_unlock(self):
self._put_and_unlock(False)
def write_unlock(self):
self.owner.lib.ocf_mngt_cache_unlock(self.cache_handle)
def add_core(self, core: Core):
self.get_and_write_lock()
self.write_lock()
c = OcfCompletion(
[
@ -374,24 +366,24 @@ class Cache:
c.wait()
if c.results["error"]:
self.put_and_write_unlock()
self.write_unlock()
raise OcfError("Failed adding core", c.results["error"])
core.cache = self
core.handle = c.results["core"]
self.cores.append(core)
self.put_and_write_unlock()
self.write_unlock()
def remove_core(self, core: Core):
self.get_and_write_lock()
self.write_lock()
c = OcfCompletion([("priv", c_void_p), ("error", c_int)])
self.owner.lib.ocf_mngt_cache_remove_core(core.handle, c, None)
c.wait()
self.put_and_write_unlock()
self.write_unlock()
if c.results["error"]:
raise OcfError("Failed removing core", c.results["error"])
@ -405,13 +397,13 @@ class Cache:
block = BlocksStats()
errors = ErrorsStats()
self.get_and_read_lock()
self.read_lock()
status = self.owner.lib.ocf_cache_get_info(
self.cache_handle, byref(cache_info)
)
if status:
self.put_and_read_unlock()
self.read_unlock()
raise OcfError("Failed getting cache info", status)
status = self.owner.lib.ocf_stats_collect_cache(
@ -422,13 +414,13 @@ class Cache:
byref(errors),
)
if status:
self.put_and_read_unlock()
self.read_unlock()
raise OcfError("Failed getting stats", status)
line_size = CacheLineSize(cache_info.cache_line_size)
cache_id = self.owner.lib.ocf_cache_get_id(self)
self.put_and_read_unlock()
self.read_unlock()
return {
"conf": {
"attached": cache_info.attached,
@ -494,7 +486,7 @@ class Cache:
if not self.started:
raise Exception("Already stopped!")
self.get_and_write_lock()
self.write_lock()
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
@ -504,40 +496,40 @@ class Cache:
c.wait()
if c.results["error"]:
self.put_and_write_unlock()
self.write_unlock()
raise OcfError("Failed stopping cache", c.results["error"])
self.mngt_queue.put()
del self.io_queues[:]
self.started = False
self.put_and_write_unlock()
self.write_unlock()
self.owner.caches.remove(self)
def flush(self):
self.get_and_write_lock()
self.write_lock()
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, c, None)
c.wait()
self.put_and_write_unlock()
self.write_unlock()
if c.results["error"]:
raise OcfError("Couldn't flush cache", c.results["error"])
def get_name(self):
self.get_and_read_lock()
self.read_lock()
try:
return str(self.owner.lib.ocf_cache_get_name(self), encoding="ascii")
except:
raise OcfError("Couldn't get cache name")
finally:
self.put_and_read_unlock()
self.read_unlock()
lib = OcfLib.getInstance()

View File

@ -112,22 +112,22 @@ class Core:
blocks = BlocksStats()
errors = ErrorsStats()
self.cache.get_and_read_lock()
self.cache.read_lock()
status = self.cache.owner.lib.ocf_stats_collect_core(
self.handle, byref(usage), byref(req), byref(blocks), byref(errors)
)
if status:
self.cache.put_and_read_unlock()
self.cache.read_unlock()
raise OcfError("Failed collecting core stats", status)
status = self.cache.owner.lib.ocf_core_get_stats(
self.handle, byref(core_stats)
)
if status:
self.cache.put_and_read_unlock()
self.cache.read_unlock()
raise OcfError("Failed getting core stats", status)
self.cache.put_and_read_unlock()
self.cache.read_unlock()
return {
"size": Size(core_stats.core_size_bytes),
"dirty_for": timedelta(seconds=core_stats.dirty_for),
@ -140,16 +140,16 @@ class Core:
}
def set_seq_cut_off_policy(self, policy: SeqCutOffPolicy):
self.cache.get_and_write_lock()
self.cache.write_lock()
status = self.cache.owner.lib.ocf_mngt_core_set_seq_cutoff_policy(
self.handle, policy
)
if status:
self.cache.put_and_write_unlock()
self.cache.write_unlock()
raise OcfError("Error setting core seq cut off policy", status)
self.cache.put_and_write_unlock()
self.cache.write_unlock()
def reset_stats(self):
self.cache.owner.lib.ocf_core_stats_initialize(self.handle)

View File

@ -21,7 +21,6 @@ class OcfErrorCode(IntEnum):
OCF_ERR_TOO_MANY_CACHES = auto()
OCF_ERR_NO_FREE_RAM = auto()
OCF_ERR_START_CACHE_FAIL = auto()
OCF_ERR_CACHE_IN_USE = auto()
OCF_ERR_CACHE_NOT_EXIST = auto()
OCF_ERR_CACHE_EXIST = auto()
OCF_ERR_TOO_MANY_CORES = auto()

View File

@ -183,7 +183,7 @@ ocf_cache_t __wrap_ocf_cleaner_get_cache(ocf_cleaner_t c)
return mock_ptr_type(struct ocf_cache*);
}
bool __wrap_ocf_mngt_is_cache_locked(ocf_cache_t cache)
bool __wrap_ocf_mngt_cache_is_locked(ocf_cache_t cache)
{
function_called();
return mock();
@ -207,13 +207,13 @@ int __wrap_env_bit_test(int nr, const void *addr)
return mock();
}
int __wrap_env_rwsem_down_write_trylock(env_rwsem *s)
int __wrap_ocf_mngt_cache_trylock(env_rwsem *s)
{
function_called();
return mock();
}
void __wrap_env_rwsem_up_write(env_rwsem *s)
void __wrap_ocf_mngt_cache_unlock(env_rwsem *s)
{
function_called();
}
@ -248,11 +248,11 @@ static void ocf_cleaner_run_test01(void **state)
expect_function_call(__wrap_env_bit_test);
will_return(__wrap_env_bit_test, 1);
expect_function_call(__wrap_ocf_mngt_is_cache_locked);
will_return(__wrap_ocf_mngt_is_cache_locked, 0);
expect_function_call(__wrap_ocf_mngt_cache_is_locked);
will_return(__wrap_ocf_mngt_cache_is_locked, 0);
expect_function_call(__wrap_env_rwsem_down_write_trylock);
will_return(__wrap_env_rwsem_down_write_trylock, 0);
expect_function_call(__wrap_ocf_mngt_cache_trylock);
will_return(__wrap_ocf_mngt_cache_trylock, 0);
expect_function_call(__wrap__ocf_cleaner_run_check_dirty_inactive);
will_return(__wrap__ocf_cleaner_run_check_dirty_inactive, 0);