Merge pull request #31 from robertbaldyga/adapt-to-new-ocf-20190604

Adapt to new OCF API
This commit is contained in:
Michał Mielewczyk 2019-06-05 12:22:01 +02:00 committed by GitHub
commit 15196017b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 284 additions and 45 deletions

View File

@ -49,10 +49,6 @@ struct {
OCF_ERR_START_CACHE_FAIL,
"Failed to insert cache"
},
{
OCF_ERR_CACHE_IN_USE,
"At least one cas device is still in use"
},
{
OCF_ERR_CACHE_NOT_EXIST,
"Cache ID does not exist"

View File

@ -8,9 +8,10 @@
#include "utils/utils_rpool.h"
#include "utils/utils_data.h"
#include "utils/utils_gc.h"
#include "utils/utils_mpool.h"
#include "threads.h"
struct ocf_mpool *cas_bvec_pool;
struct cas_mpool *cas_bvec_pool;
struct cas_reserve_pool *cas_bvec_pages_rpool;
@ -86,7 +87,7 @@ ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc)
struct page *page = NULL;
int cpu;
data = ocf_mpool_new(cas_bvec_pool, pages);
data = cas_mpool_new(cas_bvec_pool, pages);
if (!data) {
CAS_PRINT_RL(KERN_ERR "Couldn't allocate BIO vector.\n");
@ -132,7 +133,7 @@ ctx_data_t *__cas_ctx_data_alloc(uint32_t pages, bool zalloc)
}
}
ocf_mpool_del(cas_bvec_pool, data, pages);
cas_mpool_del(cas_bvec_pool, data, pages);
data = NULL;
} else {
/* Initialize iterator */
@ -174,7 +175,7 @@ void cas_ctx_data_free(ctx_data_t *ctx_data)
__free_page(page);
}
ocf_mpool_del(cas_bvec_pool, data, data->size);
cas_mpool_del(cas_bvec_pool, data, data->size);
}
static int _cas_ctx_data_mlock(ctx_data_t *ctx_data)
@ -398,8 +399,8 @@ int cas_initialize_context(void)
if (ret < 0)
return ret;
cas_bvec_pool = ocf_mpool_create(NULL, sizeof(data),
sizeof(data.vec[0]), GFP_NOIO, 7, "cas_biovec");
cas_bvec_pool = cas_mpool_create(sizeof(data), sizeof(data.vec[0]),
GFP_NOIO, 7, "cas_biovec");
if (!cas_bvec_pool) {
printk(KERN_ERR "Cannot create BIO vector memory pool\n");
@ -439,7 +440,7 @@ err_block_dev:
err_rpool:
cas_rpool_destroy(cas_bvec_pages_rpool, _cas_free_page_rpool, NULL);
err_mpool:
ocf_mpool_destroy(cas_bvec_pool);
cas_mpool_destroy(cas_bvec_pool);
err_ctx:
ocf_ctx_put(cas_ctx);
@ -451,7 +452,7 @@ void cas_cleanup_context(void)
block_dev_deinit();
atomic_dev_deinit();
cas_garbage_collector_deinit();
ocf_mpool_destroy(cas_bvec_pool);
cas_mpool_destroy(cas_bvec_pool);
cas_rpool_destroy(cas_bvec_pages_rpool, _cas_free_page_rpool, NULL);
ocf_ctx_put(cas_ctx);
@ -464,7 +465,7 @@ void cas_cleanup_context(void)
*/
struct blk_data *cas_alloc_blk_data(uint32_t size, gfp_t flags)
{
struct blk_data *data = ocf_mpool_new_f(cas_bvec_pool, size, flags);
struct blk_data *data = cas_mpool_new_f(cas_bvec_pool, size, flags);
if (data)
data->size = size;
@ -480,6 +481,6 @@ void cas_free_blk_data(struct blk_data *data)
if (!data)
return;
ocf_mpool_del(cas_bvec_pool, data, data->size);
cas_mpool_del(cas_bvec_pool, data, data->size);
}

View File

@ -19,6 +19,42 @@ struct _cache_mng_sync_context {
int *result;
};
static void _cache_mng_lock_complete(ocf_cache_t cache, void *priv, int error)
{
struct _cache_mng_sync_context *context = priv;
*context->result = error;
complete(&context->compl);
}
static int _cache_mng_lock_sync(ocf_cache_t cache)
{
struct _cache_mng_sync_context context;
int result;
init_completion(&context.compl);
context.result = &result;
ocf_mngt_cache_lock(cache, _cache_mng_lock_complete, &context);
wait_for_completion(&context.compl);
return result;
}
static int _cache_mng_read_lock_sync(ocf_cache_t cache)
{
struct _cache_mng_sync_context context;
int result;
init_completion(&context.compl);
context.result = &result;
ocf_mngt_cache_read_lock(cache, _cache_mng_lock_complete, &context);
wait_for_completion(&context.compl);
return result;
}
static void _cache_mng_save_sync_complete(ocf_cache_t cache, void *priv,
int error)
{
@ -128,7 +164,7 @@ int cache_mng_flush_object(ocf_cache_id_t cache_id, ocf_core_id_t core_id)
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -155,7 +191,7 @@ int cache_mng_flush_device(ocf_cache_id_t id)
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -177,7 +213,7 @@ int cache_mng_set_cleaning_policy(ocf_cache_id_t cache_id, uint32_t type)
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -205,7 +241,7 @@ int cache_mng_get_cleaning_policy(ocf_cache_id_t cache_id, uint32_t *type)
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -231,7 +267,7 @@ int cache_mng_set_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type,
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -260,7 +296,7 @@ int cache_mng_get_cleaning_param(ocf_cache_id_t cache_id, ocf_cleaning_t type,
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -548,7 +584,7 @@ int cache_mng_add_core_to_cache(struct ocf_mngt_core_config *cfg,
return result;
}
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -679,7 +715,7 @@ int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd)
if (!cmd->force_no_flush) {
/* First check state and flush data (if requested by user)
under read lock */
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result)
goto put;
@ -696,7 +732,7 @@ int cache_mng_remove_core_from_cache(struct kcas_remove_core *cmd)
}
/* Acquire write lock */
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result)
goto put;
@ -758,7 +794,7 @@ int cache_mng_reset_stats(ocf_cache_id_t cache_id,
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -828,7 +864,7 @@ int cache_mng_set_partitions(struct kcas_io_classes *cfg)
goto out_cls;
}
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result)
goto out_cls;
@ -1296,7 +1332,7 @@ int cache_mng_set_seq_cutoff_threshold(ocf_cache_id_t cache_id, ocf_core_id_t co
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1345,7 +1381,7 @@ int cache_mng_set_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id,
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1393,7 +1429,7 @@ int cache_mng_get_seq_cutoff_threshold(ocf_cache_id_t cache_id,
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1433,7 +1469,7 @@ int cache_mng_get_seq_cutoff_policy(ocf_cache_id_t id, ocf_core_id_t core_id,
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1470,7 +1506,7 @@ int cache_mng_set_cache_mode(ocf_cache_id_t id, ocf_cache_mode_t mode,
if (result)
return result;
result = ocf_mngt_cache_lock(cache);
result = _cache_mng_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1522,7 +1558,7 @@ int cache_mng_exit_instance(ocf_cache_id_t id, int flush)
cache_priv = ocf_cache_get_priv(cache);
status = ocf_mngt_cache_read_lock(cache);
status = _cache_mng_read_lock_sync(cache);
if (status)
goto put;
/*
@ -1550,7 +1586,7 @@ int cache_mng_exit_instance(ocf_cache_id_t id, int flush)
ocf_mngt_cache_read_unlock(cache);
/* get cache write lock */
status = ocf_mngt_cache_lock(cache);
status = _cache_mng_lock_sync(cache);
if (status)
goto put;
@ -1656,7 +1692,7 @@ int cache_mng_get_info(struct kcas_cache_info *info)
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result)
goto put;
@ -1713,7 +1749,7 @@ int cache_mng_get_io_class_info(struct kcas_io_class *part)
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
@ -1751,7 +1787,7 @@ int cache_mng_get_core_info(struct kcas_core_info *info)
if (result)
return result;
result = ocf_mngt_cache_read_lock(cache);
result = _cache_mng_read_lock_sync(cache);
if(result)
goto put;

View File

@ -18,7 +18,6 @@ struct {
{ OCF_ERR_NO_MEM, ENOMEM },
{ OCF_ERR_NO_FREE_RAM, ENOMEM },
{ OCF_ERR_START_CACHE_FAIL, EFAULT },
{ OCF_ERR_CACHE_IN_USE, EBUSY },
{ OCF_ERR_CACHE_NOT_EXIST, ENODEV },
{ OCF_ERR_CACHE_EXIST, EEXIST },
{ OCF_ERR_TOO_MANY_CORES, ENOSPC },

View File

@ -0,0 +1,114 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_env.h"
#include "utils_mpool.h"
struct cas_mpool *cas_mpool_create(uint32_t hdr_size, uint32_t size, int flags,
int mpool_max, const char *name_perfix)
{
uint32_t i;
char name[ALLOCATOR_NAME_MAX] = { '\0' };
int result;
struct cas_mpool *mpool;
mpool = env_zalloc(sizeof(*mpool), ENV_MEM_NORMAL);
if (!mpool)
return NULL;
mpool->item_size = size;
mpool->hdr_size = hdr_size;
mpool->flags = flags;
for (i = 0; i < min(cas_mpool_max, mpool_max + 1); i++) {
result = snprintf(name, sizeof(name), "%s_%u", name_perfix,
(1 << i));
if (result < 0 || result >= sizeof(name))
goto err;
mpool->allocator[i] = env_allocator_create(
hdr_size + (size * (1 << i)), name);
if (!mpool->allocator[i])
goto err;
}
return mpool;
err:
cas_mpool_destroy(mpool);
return NULL;
}
void cas_mpool_destroy(struct cas_mpool *mallocator)
{
if (mallocator) {
uint32_t i;
for (i = 0; i < cas_mpool_max; i++)
if (mallocator->allocator[i])
env_allocator_destroy(mallocator->allocator[i]);
env_free(mallocator);
}
}
static env_allocator *cas_mpool_get_allocator(
struct cas_mpool *mallocator, uint32_t count)
{
unsigned int idx;
if (unlikely(count == 0))
return cas_mpool_1;
idx = 31 - __builtin_clz(count);
if (__builtin_ffs(count) <= idx)
idx++;
if (idx >= cas_mpool_max)
return NULL;
return mallocator->allocator[idx];
}
void *cas_mpool_new_f(struct cas_mpool *mpool, uint32_t count, int flags)
{
void *items = NULL;
env_allocator *allocator;
allocator = cas_mpool_get_allocator(mpool, count);
if (allocator)
items = env_allocator_new(allocator);
else
items = env_zalloc(mpool->hdr_size + (mpool->item_size * count), flags);
#ifdef ZERO_OR_NULL_PTR
if (ZERO_OR_NULL_PTR(items))
return NULL;
#endif
return items;
}
void *cas_mpool_new(struct cas_mpool *mpool, uint32_t count)
{
return cas_mpool_new_f(mpool, count, mpool->flags);
}
void cas_mpool_del(struct cas_mpool *mpool,
void *items, uint32_t count)
{
env_allocator *allocator;
allocator = cas_mpool_get_allocator(mpool, count);
if (allocator)
env_allocator_del(allocator, items);
else
env_free(items);
}

View File

@ -0,0 +1,92 @@
/*
* Copyright(c) 2012-2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_MPOOL_H_
#define UTILS_MPOOL_H_
#define ALLOCATOR_NAME_MAX 128
enum {
cas_mpool_1,
cas_mpool_2,
cas_mpool_4,
cas_mpool_8,
cas_mpool_16,
cas_mpool_32,
cas_mpool_64,
cas_mpool_128,
cas_mpool_max
};
struct cas_mpool {
uint32_t item_size;
/*!< Size of specific item of memory pool */
uint32_t hdr_size;
/*!< Header size before items */
env_allocator *allocator[cas_mpool_max];
/*!< OS handle to memory pool */
int flags;
/*!< Allocation flags */
};
/**
* @brief Create CAS memory pool
*
* @param hdr_size Header size before array of items
* @param size Size of particular item
* @param flags Allocation flags
* @param mpool_max Maximal allocator size (power of two)
* @param name_prefix Format name prefix
*
* @return CAS memory pool
*/
struct cas_mpool *cas_mpool_create(uint32_t hdr_size, uint32_t size, int flags,
int mpool_max, const char *name_perfix);
/**
* @brief Destroy existing memory pool
*
* @param mpool memory pool
*/
void cas_mpool_destroy(struct cas_mpool *mpool);
/**
* @brief Allocate new items of memory pool
*
* @note Allocation based on ATOMIC memory pool and this function can be called
* when IRQ disable
*
* @param mpool CAS memory pool reference
* @param count Count of elements to be allocated
*
* @return Pointer to the new items
*/
void *cas_mpool_new(struct cas_mpool *mpool, uint32_t count);
/**
* @brief Allocate new items of memory pool with specified allocation flag
*
* @param mpool CAS memory pool reference
* @param count Count of elements to be allocated
* @param flags Kernel allocation falgs
*
* @return Pointer to the new items
*/
void *cas_mpool_new_f(struct cas_mpool *mpool, uint32_t count, int flags);
/**
* @brief Free existing items of memory pool
*
* @param mpool CAS memory pool reference
* @param items Items to be freed
* @param count - Count of elements to be free
*/
void cas_mpool_del(struct cas_mpool *mpool, void *items, uint32_t count);
#endif /* UTILS_MPOOL_H_ */

View File

@ -4,6 +4,7 @@
*/
#include "cas_cache.h"
#include "utils/utils_mpool.h"
#if defined(CAS_NVME_FULL)
#include <linux/nvme.h>
@ -61,7 +62,7 @@ struct cas_atomic_io {
struct bio_vec_iter iter;
};
static struct ocf_mpool *atomic_io_allocator;
static struct cas_mpool *atomic_io_allocator;
static inline uint32_t cas_atomic_max_io_sectors(void)
{
@ -96,7 +97,7 @@ static void cas_atomic_dealloc(struct cas_atomic_io *atomics)
}
}
ocf_mpool_del(atomic_io_allocator, atomics, atomics->count);
cas_mpool_del(atomic_io_allocator, atomics, atomics->count);
}
static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool write_zero)
@ -138,7 +139,7 @@ static struct cas_atomic_io *cas_atomic_alloc(int dir, struct ocf_io *io, bool w
/* Get number of IOs to be issued */
ios_count = DIV_ROUND_UP(bytes, max_io_size);
atoms = ocf_mpool_new(atomic_io_allocator,
atoms = cas_mpool_new(atomic_io_allocator,
ios_count);
if (!atoms)
return NULL;
@ -808,7 +809,7 @@ void cas_atomic_submit_discard(struct ocf_io *io)
}
/* Allocate and setup control structure. */
atom = ocf_mpool_new(atomic_io_allocator, 1);
atom = cas_mpool_new(atomic_io_allocator, 1);
if (!atom) {
CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n");
io->end(io, -ENOMEM);
@ -880,7 +881,7 @@ void cas_atomic_submit_flush(struct ocf_io *io)
}
/* Allocate and setup control structure. */
atom = ocf_mpool_new(atomic_io_allocator, 1);
atom = cas_mpool_new(atomic_io_allocator, 1);
if (!atom) {
CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for IO ctrl\n");
io->end(io, -ENOMEM);
@ -1160,8 +1161,8 @@ int atomic_dev_init(void)
if (ret < 0)
return -EINVAL;
atomic_io_allocator = ocf_mpool_create(NULL, 0,
sizeof(struct cas_atomic_io), GFP_NOIO, 1, "cas_atomic_io");
atomic_io_allocator = cas_mpool_create(0, sizeof(struct cas_atomic_io),
GFP_NOIO, 1, "cas_atomic_io");
if (!atomic_io_allocator) {
ocf_ctx_unregister_volume_type(cas_ctx, ATOMIC_DEVICE_VOLUME);
@ -1174,7 +1175,7 @@ int atomic_dev_init(void)
void atomic_dev_deinit(void)
{
if (atomic_io_allocator) {
ocf_mpool_destroy(atomic_io_allocator);
cas_mpool_destroy(atomic_io_allocator);
atomic_io_allocator = NULL;
}

2
ocf

@ -1 +1 @@
Subproject commit 93a06686a7e2285b674b760c6360a7577556b8c7
Subproject commit 75fb6c7940667e5845d24317cf703624a2485a88