Merge pull request #222 from robertbaldyga/name-based-registration

Name based cache/core management
This commit is contained in:
Michał Mielewczyk 2019-07-30 11:02:17 +02:00 committed by GitHub
commit bc20845673
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 454 additions and 585 deletions

View File

@ -100,15 +100,6 @@ struct ocf_cache_info {
*/
ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache);
/**
* @brief Get ID of given cache object
*
* @param[in] cache Cache object
*
* @retval Cache ID
*/
ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache);
/**
* @brief Set name of given cache object
*

View File

@ -16,6 +16,19 @@
#include "ocf_io.h"
#include "ocf_mngt.h"
/**
* @brief Get OCF core by name
*
* @param[in] cache OCF cache
* @param[in] name Core name
* @param[out] core OCF core handle
*
* @retval 0 Get cache successfully
* @retval -OCF_ERR_CORE_NOT_EXIST Core with given name doesn't exist
*/
int ocf_core_get_by_name(ocf_cache_t cache, const char *name,
ocf_core_t *core);
/**
* @brief Obtain cache object from core
*
@ -82,18 +95,6 @@ ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core);
*/
ocf_core_id_t ocf_core_get_id(ocf_core_t core);
/**
* @brief Set name of given core object
*
* @param[in] core Core object
* @param[in] src Source of Core name
* @param[in] src_size Size of src
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size);
/**
* @brief Get name of given core object
*

View File

@ -57,12 +57,18 @@ typedef enum {
/** Start cache failure */
OCF_ERR_START_CACHE_FAIL,
/** Cache ID does not exist */
/** Cache ID/name does not exist */
OCF_ERR_CACHE_NOT_EXIST,
/** Cache ID already exists */
/** Core ID/name does not exist */
OCF_ERR_CORE_NOT_EXIST,
/** Cache ID/name already exists */
OCF_ERR_CACHE_EXIST,
/** Core ID/name already exists */
OCF_ERR_CORE_EXIST,
/** Too many core devices in cache */
OCF_ERR_TOO_MANY_CORES,

View File

@ -19,6 +19,11 @@
* @brief Core start configuration
*/
struct ocf_mngt_core_config {
/**
* @brief OCF core name
*/
const char *name;
/**
* @brief OCF core volume UUID
*/
@ -29,17 +34,6 @@ struct ocf_mngt_core_config {
*/
uint8_t volume_type;
/**
* @brief OCF core ID number
*/
ocf_core_id_t core_id;
/**
* @brief OCF core name. In case of being NULL, core id is stringified
* to core name
*/
const char *name;
/**
* @brief Add core to pool if cache isn't present or add core to
* earlier loaded cache
@ -58,7 +52,7 @@ struct ocf_mngt_core_config {
/**
* @brief Initialize core config to default values
*
* @note This function doesn't initiialize uuid and volume_type fields
* @note This function doesn't initialize name, uuid and volume_type fields
* which have no default values and are required to be set by user.
*
* @param[in] cfg Core config stucture
@ -66,8 +60,6 @@ struct ocf_mngt_core_config {
static inline void ocf_mngt_core_config_set_default(
struct ocf_mngt_core_config *cfg)
{
cfg->core_id = OCF_CORE_ID_INVALID;
cfg->name = NULL;
cfg->try_add = false;
cfg->seq_cutoff_threshold = 1024;
cfg->user_metadata.data = NULL;
@ -86,20 +78,20 @@ uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx);
/* Cache instances getters */
/**
* @brief Get OCF cache
* @brief Get OCF cache by name
*
* @note This function on success also increasing reference counter
* @note This function on success also increases reference counter
* in given cache
*
* @param[in] ctx OCF context
* @param[in] id OCF cache ID
* @param[in] name OCF cache name
* @param[out] cache OCF cache handle
*
* @retval 0 Get cache successfully
* @retval -OCF_ERR_INV_CACHE_ID Cache ID out of range
* @retval -OCF_ERR_CACHE_NOT_EXIST Cache with given ID is not exist
* @retval -OCF_ERR_CACHE_NOT_EXIST Cache with given name doesn't exist
*/
int ocf_mngt_cache_get_by_id(ocf_ctx_t ctx, ocf_cache_id_t id, ocf_cache_t *cache);
int ocf_mngt_cache_get_by_name(ocf_ctx_t ctx, const char* name,
ocf_cache_t *cache);
/**
* @brief Increment reference counter of cache
@ -241,14 +233,7 @@ int ocf_mngt_cache_visit_reverse(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor
*/
struct ocf_mngt_cache_config {
/**
* @brief Cache ID. In case of setting this field to invalid cache
* id first available cache ID will be set
*/
ocf_cache_id_t id;
/**
* @brief Cache name. In case of being NULL, cache id is stringified to
* cache name
* @brief Cache name
*/
const char *name;
@ -309,13 +294,14 @@ struct ocf_mngt_cache_config {
/**
* @brief Initialize core config to default values
*
* @note This function doesn't initialize name field which has no default
* value and is required to be set by user.
*
* @param[in] cfg Cache config stucture
*/
static inline void ocf_mngt_cache_config_set_default(
struct ocf_mngt_cache_config *cfg)
{
cfg->id = OCF_CACHE_ID_INVALID;
cfg->name = NULL;
cfg->cache_mode = ocf_cache_mode_default;
cfg->eviction_policy = ocf_eviction_default;
cfg->promotion_policy = ocf_promotion_default;

View File

@ -59,8 +59,8 @@ struct ocf_event_cache_desc {
/** Event header */
struct ocf_event_hdr hdr;
/** Cache Id */
ocf_cache_id_t id;
/** Cache name */
const char *name;
/** Cache line size */
ocf_cache_line_size_t cache_line_size;

View File

@ -12,11 +12,6 @@
#include "ocf_env_headers.h"
/**
* @brief cache id type (by default designated as 16 bit unsigned integer)
*/
typedef uint16_t ocf_cache_id_t;
/**
* @brief cache line type (by default designated as 32 bit unsigned integer)
*/

View File

@ -32,18 +32,16 @@ int ocf_metadata_init(struct ocf_cache *cache,
ENV_BUG_ON(cache->metadata.iface_priv);
ret = ocf_metadata_io_init(cache);
if (ret)
return ret;
*iface = *metadata_hash_get_iface();
ret = cache->metadata.iface.init(cache, cache_line_size);
if (ret)
if (ret) {
ocf_metadata_io_deinit(cache);
return ret;
}
ocf_metadata_concurrency_init(cache);
return ret;
return 0;
}
int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size,

View File

@ -6,6 +6,7 @@
#ifndef __METADATA_SUPERBLOCK_H__
#define __METADATA_SUPERBLOCK_H__
#include <ocf/ocf_def.h>
#include <ocf/ocf_def.h>
#define CACHE_MAGIC_NUMBER 0x187E1CA6
@ -26,6 +27,8 @@ struct ocf_superblock_config {
/* Currently set cache mode */
ocf_cache_mode_t cache_mode;
char name[OCF_CACHE_NAME_SIZE];
ocf_cache_line_t cachelines;
uint32_t valid_parts_no;

View File

@ -25,22 +25,6 @@
#define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
static ocf_cache_t _ocf_mngt_get_cache(ocf_ctx_t owner,
ocf_cache_id_t cache_id)
{
ocf_cache_t iter = NULL;
ocf_cache_t cache = NULL;
list_for_each_entry(iter, &owner->caches, list) {
if (iter->cache_id == cache_id) {
cache = iter;
break;
}
}
return cache;
}
#define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
"previous cache state (Warning: data corruption may happen)" \
"\nOr initialize your cache using --force option. " \
@ -55,9 +39,6 @@ static ocf_cache_t _ocf_mngt_get_cache(ocf_ctx_t owner,
struct ocf_cache_mngt_init_params {
bool metadata_volatile;
ocf_cache_id_t id;
/*!< cache id */
ocf_ctx_t ctx;
/*!< OCF context */
@ -175,18 +156,6 @@ struct ocf_cache_attach_context {
ocf_pipeline_t pipeline;
};
static ocf_cache_id_t _ocf_mngt_cache_find_free_id(ocf_ctx_t owner)
{
ocf_cache_id_t id = OCF_CACHE_ID_INVALID;
for (id = OCF_CACHE_ID_MIN; id <= OCF_CACHE_ID_MAX; id++) {
if (!_ocf_mngt_get_cache(owner, id))
return id;
}
return OCF_CACHE_ID_INVALID;
}
static void __init_hash_table(ocf_cache_t cache)
{
/* Initialize hash table*/
@ -401,8 +370,6 @@ static int _ocf_mngt_init_instance_add_cores(
struct ocf_cache_attach_context *context)
{
ocf_cache_t cache = context->cache;
/* FIXME: This is temporary hack. Remove after storing name it meta. */
char core_name[OCF_CORE_NAME_SIZE];
ocf_core_t core;
ocf_core_id_t core_id;
int ret = -1;
@ -427,14 +394,6 @@ static int _ocf_mngt_init_instance_add_cores(
if (!core->volume.type)
goto err;
ret = snprintf(core_name, sizeof(core_name), "core%d", core_id);
if (ret < 0 || ret >= sizeof(core_name))
goto err;
ret = ocf_core_set_name(core, core_name, sizeof(core_name));
if (ret)
goto err;
tvolume = ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache),
&core->volume.uuid, core->volume.type);
if (tvolume) {
@ -624,9 +583,6 @@ static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
/* start with freezed metadata ref counter to indicate detached device*/
ocf_refcnt_freeze(&cache->refcnt.metadata);
/* Copy all required initialization parameters */
cache->cache_id = params->id;
env_atomic_set(&(cache->last_access_ms),
env_ticks_to_msecs(env_get_tick_count()));
@ -701,44 +657,22 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param
struct ocf_mngt_cache_config *cfg)
{
ocf_cache_t cache;
char cache_name[OCF_CACHE_NAME_SIZE];
int ret = 0;
ret = env_mutex_lock_interruptible(&param->ctx->lock);
ret = env_rmutex_lock_interruptible(&param->ctx->lock);
if (ret)
return ret;
if (param->id == OCF_CACHE_ID_INVALID) {
/* ID was not specified, take first free id */
param->id = _ocf_mngt_cache_find_free_id(param->ctx);
if (param->id == OCF_CACHE_ID_INVALID) {
ret = -OCF_ERR_TOO_MANY_CACHES;
goto out;
}
cfg->id = param->id;
} else {
/* ID was set, check if cache exist with specified ID */
cache = _ocf_mngt_get_cache(param->ctx, param->id);
if (cache) {
/* Check if cache with specified name exists */
ret = ocf_mngt_cache_get_by_name(param->ctx, cfg->name, &cache);
if (!ret) {
ocf_mngt_cache_put(cache);
/* Cache already exist */
ret = -OCF_ERR_CACHE_EXIST;
goto out;
}
}
if (cfg->name) {
ret = env_strncpy(cache_name, sizeof(cache_name) - 1,
cfg->name, sizeof(cache_name) - 1);
if (ret)
goto out;
} else {
ret = snprintf(cache_name, sizeof(cache_name),
"cache%hu", param->id);
if (ret < 0)
goto out;
}
ocf_log(param->ctx, log_info, "Inserting cache %s\n", cache_name);
ocf_log(param->ctx, log_info, "Inserting cache %s\n", cfg->name);
ret = _ocf_mngt_init_new_cache(param);
if (ret)
@ -746,10 +680,6 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param
cache = param->cache;
ret = ocf_cache_set_name(cache, cache_name, sizeof(cache_name));
if (ret)
goto out;
cache->backfill.max_queue_size = cfg->backfill.max_queue_size;
cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size;
@ -762,7 +692,7 @@ static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param
cache->metadata.is_volatile = cfg->metadata_volatile;
out:
env_mutex_unlock(&param->ctx->lock);
env_rmutex_unlock(&param->ctx->lock);
return ret;
}
@ -1196,12 +1126,12 @@ static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
if (params->flags.metadata_inited)
ocf_metadata_deinit(cache);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
list_del(&cache->list);
env_vfree(cache);
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
}
static void _ocf_mngt_attach_handle_error(
@ -1263,12 +1193,11 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
struct ocf_mngt_cache_config *cfg)
{
struct ocf_cache_mngt_init_params params;
ocf_cache_t tmp_cache;
int result;
ENV_BUG_ON(env_memset(&params, sizeof(params), 0));
params.id = cfg->id;
params.ctx = ctx;
params.metadata.cache_mode = cfg->cache_mode;
params.metadata.layout = cfg->metadata_layout;
@ -1282,21 +1211,30 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
if (result)
goto _cache_mngt_init_instance_ERROR;
*cache = params.cache;
tmp_cache = params.cache;
/*
* Initialize metadata selected segments of metadata in memory
*/
result = ocf_metadata_init(*cache, params.metadata.line_size);
result = ocf_metadata_init(tmp_cache, params.metadata.line_size);
if (result) {
result = -OCF_ERR_START_CACHE_FAIL;
goto _cache_mngt_init_instance_ERROR;
}
ocf_log(ctx, log_debug, "Metadata initialized\n");
params.flags.metadata_inited = true;
_ocf_mngt_cache_init(*cache, &params);
result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE);
if (result)
goto _cache_mngt_init_instance_ERROR;
result = ocf_metadata_io_init(tmp_cache);
if (result)
goto _cache_mngt_init_instance_ERROR;
ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
_ocf_mngt_cache_init(tmp_cache, &params);
ocf_ctx_get(ctx);
@ -1304,10 +1242,12 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
/* User did not request to lock cache instance after creation -
unlock it here since we have acquired the lock to
perform management operations. */
ocf_mngt_cache_unlock(*cache);
ocf_mngt_cache_unlock(tmp_cache);
params.flags.cache_locked = false;
}
*cache = tmp_cache;
return 0;
_cache_mngt_init_instance_ERROR:
@ -1640,7 +1580,7 @@ err_pipeline:
static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg)
{
if (cfg->id > OCF_CACHE_ID_MAX)
if (!cfg->name)
return -OCF_ERR_INVAL;
if (!ocf_cache_mode_is_valid(cfg->cache_mode))
@ -2028,12 +1968,12 @@ static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline,
static void ocf_mngt_cache_remove(ocf_ctx_t ctx, ocf_cache_t cache)
{
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
/* Mark device uninitialized */
ocf_refcnt_freeze(&cache->refcnt.cache);
/* Remove cache from the list */
list_del(&cache->list);
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
}
static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline,

View File

@ -139,27 +139,21 @@ void ocf_mngt_cache_put(ocf_cache_t cache)
}
}
int ocf_mngt_cache_get_by_id(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *cache)
int ocf_mngt_cache_get_by_name(ocf_ctx_t ctx, const char *name,
ocf_cache_t *cache)
{
int error = 0;
struct ocf_cache *instance = NULL;
struct ocf_cache *iter = NULL;
OCF_CHECK_NULL(ocf_ctx);
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(cache);
*cache = NULL;
if ((id < OCF_CACHE_ID_MIN) || (id > OCF_CACHE_ID_MAX)) {
/* Cache id out of range */
return -OCF_ERR_INVAL;
}
/* Lock caches list */
env_mutex_lock(&ocf_ctx->lock);
env_rmutex_lock(&ctx->lock);
list_for_each_entry(iter, &ocf_ctx->caches, list) {
if (iter->cache_id == id) {
list_for_each_entry(iter, &ctx->caches, list) {
if (!env_strncmp(ocf_cache_get_name(iter), name,
OCF_CACHE_NAME_SIZE)) {
instance = iter;
break;
}
@ -173,14 +167,14 @@ int ocf_mngt_cache_get_by_id(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *
}
}
env_mutex_unlock(&ocf_ctx->lock);
env_rmutex_unlock(&ctx->lock);
if (!instance)
error = -OCF_ERR_CACHE_NOT_EXIST;
else
return -OCF_ERR_CACHE_NOT_EXIST;
*cache = instance;
return error;
return 0;
}
typedef void (*ocf_lock_fn_t)(ocf_async_lock_waiter_t waiter);
@ -363,7 +357,7 @@ static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
*list = NULL;
*size = 0;
env_mutex_lock(&ocf_ctx->lock);
env_rmutex_lock(&ocf_ctx->lock);
list_for_each_entry(iter, &ocf_ctx->caches, list) {
count++;
@ -392,7 +386,7 @@ static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
}
END:
env_mutex_unlock(&ocf_ctx->lock);
env_rmutex_unlock(&ocf_ctx->lock);
return result;
}

View File

@ -21,6 +21,12 @@ static ocf_seq_no_t _ocf_mngt_get_core_seq_no(ocf_cache_t cache)
return ++cache->conf_meta->curr_core_seq_no;
}
static int ocf_mngt_core_set_name(ocf_core_t core, const char *name)
{
return env_strncpy(core->conf_meta->name, OCF_CORE_NAME_SIZE,
name, OCF_CORE_NAME_SIZE);
}
static int _ocf_uuid_set(const struct ocf_volume_uuid *uuid,
struct ocf_metadata_uuid *muuid)
{
@ -73,53 +79,11 @@ void ocf_mngt_core_clear_uuid_metadata(ocf_core_t core)
ocf_mngt_core_set_uuid_metadata(core, &uuid, NULL);
}
static int _ocf_mngt_cache_try_add_core(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg)
{
int result = 0;
ocf_core_t tmp_core;
ocf_volume_t volume;
tmp_core = &cache->core[cfg->core_id];
volume = &tmp_core->volume;
if (ocf_ctx_get_volume_type_id(cache->owner, volume->type) !=
cfg->volume_type) {
result = -OCF_ERR_INVAL_VOLUME_TYPE;
goto error_out;
}
result = ocf_volume_open(volume, NULL);
if (result)
goto error_out;
if (!ocf_volume_get_length(volume)) {
result = -OCF_ERR_CORE_NOT_AVAIL;
goto error_after_open;
}
tmp_core->opened = true;
if (!(--cache->ocf_core_inactive_count))
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
*core = tmp_core;
return 0;
error_after_open:
ocf_volume_close(volume);
error_out:
*core = NULL;
return result;
}
struct ocf_cache_add_core_context {
ocf_mngt_cache_add_core_end_t cmpl;
void *priv;
ocf_pipeline_t pipeline;
struct ocf_mngt_core_config cfg;
char core_name[OCF_CORE_NAME_SIZE];
ocf_cache_t cache;
ocf_core_t core;
@ -135,20 +99,21 @@ struct ocf_cache_add_core_context {
static void _ocf_mngt_cache_add_core_handle_error(
struct ocf_cache_add_core_context *context)
{
struct ocf_mngt_core_config *cfg = &context->cfg;
ocf_cache_t cache = context->cache;
ocf_core_t core = context->core;
ocf_core_id_t core_id;
ocf_volume_t volume;
ocf_cleaning_t clean_type;
if (!core)
return;
core_id = ocf_core_get_id(core);
volume = &core->volume;
clean_type = cache->conf_meta->cleaning_policy_type;
if (context->flags.counters_allocated) {
env_bit_clear(cfg->core_id,
env_bit_clear(core_id,
cache->conf_meta->valid_core_bitmap);
core->conf_meta->added = false;
core->opened = false;
@ -160,7 +125,7 @@ static void _ocf_mngt_cache_add_core_handle_error(
if (context->flags.clean_pol_added) {
if (cleaning_policy_ops[clean_type].remove_core)
cleaning_policy_ops[clean_type].remove_core(cache,
cfg->core_id);
core_id);
}
if (context->flags.volume_opened)
@ -173,123 +138,6 @@ static void _ocf_mngt_cache_add_core_handle_error(
ocf_mngt_core_clear_uuid_metadata(core);
}
static void _ocf_mngt_cache_add_core_flush_sb_complete(void *priv, int error)
{
struct ocf_cache_add_core_context *context = priv;
if (error)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
/* Increase value of added cores */
context->cache->conf_meta->core_count++;
ocf_pipeline_next(context->pipeline);
}
static void _ocf_mngt_cache_add_core(ocf_cache_t cache,
struct ocf_cache_add_core_context *context)
{
struct ocf_mngt_core_config *cfg = &context->cfg;
ocf_core_t core;
struct ocf_volume_uuid new_uuid;
ocf_volume_t volume;
ocf_volume_type_t type;
ocf_seq_no_t core_sequence_no;
ocf_cleaning_t clean_type;
uint64_t length;
int result = 0;
core = ocf_cache_get_core(cache, cfg->core_id);
context->core = core;
volume = &core->volume;
volume->cache = cache;
/* Set uuid */
result = ocf_mngt_core_set_uuid_metadata(core, &cfg->uuid, &new_uuid);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
context->flags.uuid_set = true;
type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
if (!type) {
OCF_PL_FINISH_RET(context->pipeline,
-OCF_ERR_INVAL_VOLUME_TYPE);
}
result = ocf_volume_init(volume, type, &new_uuid, false);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
context->flags.volume_inited = true;
if (cfg->user_metadata.data && cfg->user_metadata.size > 0) {
result = ocf_mngt_core_set_user_metadata(core,
cfg->user_metadata.data,
cfg->user_metadata.size);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
}
result = ocf_volume_open(volume, NULL);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
context->flags.volume_opened = true;
length = ocf_volume_get_length(volume);
if (!length)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_NOT_AVAIL);
core->conf_meta->length = length;
clean_type = cache->conf_meta->cleaning_policy_type;
if (ocf_cache_is_device_attached(cache) &&
cleaning_policy_ops[clean_type].add_core) {
result = cleaning_policy_ops[clean_type].add_core(cache,
cfg->core_id);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
context->flags.clean_pol_added = true;
}
/* When adding new core to cache, allocate stat counters */
core->counters =
env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
if (!core->counters)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
context->flags.counters_allocated = true;
/* When adding new core to cache, reset all core/cache statistics */
ocf_core_stats_initialize(core);
env_atomic_set(&core->runtime_meta->cached_clines, 0);
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
/* In metadata mark data this core was added into cache */
env_bit_set(cfg->core_id, cache->conf_meta->valid_core_bitmap);
core->conf_meta->added = true;
core->opened = true;
/* Set default cache parameters for sequential */
core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
if (core_sequence_no == OCF_SEQ_NO_INVALID)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_TOO_MANY_CORES);
core->conf_meta->seq_no = core_sequence_no;
/* Update super-block with core device addition */
ocf_metadata_flush_superblock(cache,
_ocf_mngt_cache_add_core_flush_sb_complete, context);
}
static unsigned long _ffz(unsigned long word)
{
int i;
@ -325,117 +173,21 @@ static unsigned long _ocf_mngt_find_first_free_core(const unsigned long *bitmap)
return ret;
}
static int __ocf_mngt_lookup_core_uuid(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg)
static int ocf_mngt_find_free_core(ocf_cache_t cache, ocf_core_t *core)
{
int i;
ocf_core_id_t core_id;
ocf_core_t tmp_core;
for (i = 0; i < OCF_CORE_MAX; i++) {
ocf_core_t core = &cache->core[i];
if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
continue;
if (cache->core[i].opened)
continue;
if (ocf_ctx_get_volume_type_id(cache->owner, core->volume.type)
!= cfg->volume_type) {
continue;
}
if (!env_strncmp(core->volume.uuid.data, cfg->uuid.data,
OCF_MIN(core->volume.uuid.size,
cfg->uuid.size)))
return i;
}
return OCF_CORE_MAX;
}
static int __ocf_mngt_try_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id)
{
if (tmp_core_id == OCF_CORE_MAX) {
ocf_cache_log(cache, log_err, "Core with given uuid not found "
"in cache metadata\n");
return -OCF_ERR_CORE_NOT_AVAIL;
}
if (cfg->core_id == OCF_CORE_MAX) {
cfg->core_id = tmp_core_id;
return 0;
}
if (cfg->core_id != tmp_core_id) {
ocf_cache_log(cache, log_err,
"Given core id doesn't match with metadata\n");
return -OCF_ERR_CORE_NOT_AVAIL;
}
cfg->core_id = tmp_core_id;
return 0;
}
static int __ocf_mngt_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id)
{
if (tmp_core_id != OCF_CORE_MAX) {
ocf_cache_log(cache, log_err,
"Core ID already added as inactive with id:"
" %hu.\n", tmp_core_id);
return -OCF_ERR_CORE_NOT_AVAIL;
}
if (cfg->core_id == OCF_CORE_MAX) {
ocf_cache_log(cache, log_debug, "Core ID is unspecified - "
"will set first available number\n");
/* Core is unspecified */
cfg->core_id = _ocf_mngt_find_first_free_core(
core_id = _ocf_mngt_find_first_free_core(
cache->conf_meta->valid_core_bitmap);
/* no need to check if find_first_zero_bit failed and
* *core_id == MAX_CORE_OBJS_PER_CACHE, as above there is check
* for core_count being greater or equal to
* MAX_CORE_OBJS_PER_CACHE
*/
} else if (cfg->core_id < OCF_CORE_MAX) {
/* check if id is not used already */
if (env_bit_test(cfg->core_id,
cache->conf_meta->valid_core_bitmap)) {
ocf_cache_log(cache, log_debug,
"Core ID already allocated: %d.\n",
cfg->core_id);
return -OCF_ERR_CORE_NOT_AVAIL;
}
} else {
ocf_cache_log(cache, log_err,
"Core ID exceeds maximum of %d.\n",
OCF_CORE_MAX);
return -OCF_ERR_CORE_NOT_AVAIL;
}
return 0;
}
static int _ocf_mngt_find_core_id(ocf_cache_t cache,
struct ocf_mngt_core_config *cfg)
{
int result;
ocf_core_id_t tmp_core_id;
if (cache->conf_meta->core_count >= OCF_CORE_MAX)
tmp_core = ocf_cache_get_core(cache, core_id);
if (!tmp_core)
return -OCF_ERR_TOO_MANY_CORES;
tmp_core_id = __ocf_mngt_lookup_core_uuid(cache, cfg);
*core = tmp_core;
if (cfg->try_add)
result = __ocf_mngt_try_find_core_id(cache, cfg, tmp_core_id);
else
result = __ocf_mngt_find_core_id(cache, cfg, tmp_core_id);
return result;
return 0;
}
int ocf_mngt_core_init_front_volume(ocf_core_t core)
@ -463,35 +215,117 @@ int ocf_mngt_core_init_front_volume(ocf_core_t core)
return ret;
}
static void ocf_mngt_cache_add_core_prepare(ocf_pipeline_t pipeline,
static void ocf_mngt_cache_try_add_core_prepare(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_add_core_context *context = priv;
struct ocf_mngt_core_config *cfg = &context->cfg;
ocf_cache_t cache = context->cache;
ocf_core_t core;
ocf_volume_t volume;
ocf_volume_type_t type;
ocf_ctx_t ctx = cache->owner;
int result;
result = ocf_core_get_by_name(cache, cfg->name, &core);
if (result)
goto err;
if (core->opened) {
result = -OCF_ERR_INVAL;
goto err;
}
volume = ocf_core_get_volume(core);
type = ocf_volume_get_type(volume);
if (ocf_ctx_get_volume_type_id(ctx, type) != cfg->volume_type) {
result = -OCF_ERR_INVAL_VOLUME_TYPE;
goto err;
}
if (env_strncmp(volume->uuid.data, cfg->uuid.data,
OCF_MIN(volume->uuid.size, cfg->uuid.size))) {
result = -OCF_ERR_INVAL;
goto err;
}
context->core = core;
OCF_PL_NEXT_RET(pipeline);
err:
ocf_cache_log(cache, log_err, "Core with given uuid not found "
"in cache metadata\n");
OCF_PL_FINISH_RET(pipeline, result);
}
static void ocf_mngt_cache_try_add_core_insert(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_add_core_context *context = priv;
ocf_cache_t cache = context->cache;
char *core_name = context->core_name;
ocf_core_t core = context->core;
ocf_volume_t volume;
int result;
result = _ocf_mngt_find_core_id(cache, &context->cfg);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
ocf_core_log(core, log_debug, "Inserting core\n");
if (context->cfg.name) {
result = env_strncpy(core_name, sizeof(context->core_name),
context->cfg.name, sizeof(context->core_name));
volume = ocf_core_get_volume(core);
result = ocf_volume_open(volume, NULL);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
} else {
result = snprintf(core_name, sizeof(context->core_name),
"core%hu", context->cfg.core_id);
if (result < 0)
OCF_PL_FINISH_RET(context->pipeline, result);
OCF_PL_FINISH_RET(pipeline, result);
if (!ocf_volume_get_length(volume)) {
result = -OCF_ERR_CORE_NOT_AVAIL;
goto error_after_open;
}
result = ocf_core_set_name(&cache->core[context->cfg.core_id],
core_name, sizeof(context->core_name));
core->opened = true;
if (!(--cache->ocf_core_inactive_count))
env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
OCF_PL_NEXT_RET(pipeline);
error_after_open:
ocf_volume_close(volume);
OCF_PL_FINISH_RET(pipeline, result);
}
static void ocf_mngt_cache_add_core_prepare(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_add_core_context *context = priv;
struct ocf_mngt_core_config *cfg = &context->cfg;
ocf_cache_t cache = context->cache;
ocf_core_t core;
int result;
result = ocf_core_get_by_name(cache, cfg->name, &core);
if (!result)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CORE_EXIST);
result = ocf_mngt_find_free_core(cache, &core);
if (result)
OCF_PL_FINISH_RET(context->pipeline, result);
context->core = core;
ocf_pipeline_next(context->pipeline);
}
static void _ocf_mngt_cache_add_core_flush_sb_complete(void *priv, int error)
{
struct ocf_cache_add_core_context *context = priv;
if (error)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
/* Increase value of added cores */
context->cache->conf_meta->core_count++;
ocf_pipeline_next(context->pipeline);
}
@ -499,20 +333,109 @@ static void ocf_mngt_cache_add_core_insert(ocf_pipeline_t pipeline,
void *priv, ocf_pipeline_arg_t arg)
{
struct ocf_cache_add_core_context *context = priv;
struct ocf_mngt_core_config *cfg = &context->cfg;
ocf_cache_t cache = context->cache;
char *core_name = context->core_name;
int result;
ocf_core_t core = context->core;
ocf_core_id_t core_id;
struct ocf_volume_uuid new_uuid;
ocf_volume_t volume;
ocf_volume_type_t type;
ocf_seq_no_t core_sequence_no;
ocf_cleaning_t clean_type;
uint64_t length;
int result = 0;
ocf_cache_log(cache, log_debug, "Inserting core %s\n", core_name);
ocf_cache_log(cache, log_debug, "Inserting core %s\n", cfg->name);
if (context->cfg.try_add) {
result = _ocf_mngt_cache_try_add_core(cache, &context->core,
&context->cfg);
volume = ocf_core_get_volume(core);
volume->cache = cache;
core_id = ocf_core_get_id(core);
OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, result);
result = ocf_mngt_core_set_name(core, cfg->name);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
/* Set uuid */
result = ocf_mngt_core_set_uuid_metadata(core, &cfg->uuid, &new_uuid);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
context->flags.uuid_set = true;
type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
if (!type)
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_INVAL_VOLUME_TYPE);
result = ocf_volume_init(volume, type, &new_uuid, false);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
context->flags.volume_inited = true;
if (cfg->user_metadata.data && cfg->user_metadata.size > 0) {
result = ocf_mngt_core_set_user_metadata(core,
cfg->user_metadata.data,
cfg->user_metadata.size);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
}
_ocf_mngt_cache_add_core(cache, context);
result = ocf_volume_open(volume, NULL);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
context->flags.volume_opened = true;
length = ocf_volume_get_length(volume);
if (!length)
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_CORE_NOT_AVAIL);
core->conf_meta->length = length;
clean_type = cache->conf_meta->cleaning_policy_type;
if (ocf_cache_is_device_attached(cache) &&
cleaning_policy_ops[clean_type].add_core) {
result = cleaning_policy_ops[clean_type].add_core(cache,
core_id);
if (result)
OCF_PL_FINISH_RET(pipeline, result);
context->flags.clean_pol_added = true;
}
/* When adding new core to cache, allocate stat counters */
core->counters =
env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
if (!core->counters)
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
context->flags.counters_allocated = true;
/* When adding new core to cache, reset all core/cache statistics */
ocf_core_stats_initialize(core);
env_atomic_set(&core->runtime_meta->cached_clines, 0);
env_atomic_set(&core->runtime_meta->dirty_clines, 0);
env_atomic64_set(&core->runtime_meta->dirty_since, 0);
/* In metadata mark data this core was added into cache */
env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
core->conf_meta->added = true;
core->opened = true;
/* Set default cache parameters for sequential */
core->conf_meta->seq_cutoff_policy = ocf_seq_cutoff_policy_default;
core->conf_meta->seq_cutoff_threshold = cfg->seq_cutoff_threshold;
/* Add core sequence number for atomic metadata matching */
core_sequence_no = _ocf_mngt_get_core_seq_no(cache);
if (core_sequence_no == OCF_SEQ_NO_INVALID)
OCF_PL_FINISH_RET(pipeline, -OCF_ERR_TOO_MANY_CORES);
core->conf_meta->seq_no = core_sequence_no;
/* Update super-block with core device addition */
ocf_metadata_flush_superblock(cache,
_ocf_mngt_cache_add_core_flush_sb_complete, context);
}
static void ocf_mngt_cache_add_core_init_front_volume(ocf_pipeline_t pipeline,
@ -540,10 +463,10 @@ static void ocf_mngt_cache_add_core_finish(ocf_pipeline_t pipeline,
if (error == -OCF_ERR_CORE_NOT_AVAIL) {
ocf_cache_log(cache, log_err, "Core %s is zero size\n",
context->core_name);
context->cfg.name);
}
ocf_cache_log(cache, log_err, "Adding core %s failed\n",
context->core_name);
context->cfg.name);
goto out;
}
@ -551,11 +474,23 @@ static void ocf_mngt_cache_add_core_finish(ocf_pipeline_t pipeline,
out:
context->cmpl(cache, core, context->priv, error);
env_vfree(context->cfg.name);
env_vfree(context->cfg.uuid.data);
ocf_pipeline_destroy(context->pipeline);
}
struct ocf_pipeline_properties ocf_mngt_cache_add_core_pipeline_properties = {
struct ocf_pipeline_properties ocf_mngt_cache_try_add_core_pipeline_props = {
.priv_size = sizeof(struct ocf_cache_add_core_context),
.finish = ocf_mngt_cache_add_core_finish,
.steps = {
OCF_PL_STEP(ocf_mngt_cache_try_add_core_prepare),
OCF_PL_STEP(ocf_mngt_cache_try_add_core_insert),
OCF_PL_STEP(ocf_mngt_cache_add_core_init_front_volume),
OCF_PL_STEP_TERMINATOR(),
},
};
struct ocf_pipeline_properties ocf_mngt_cache_add_core_pipeline_props = {
.priv_size = sizeof(struct ocf_cache_add_core_context),
.finish = ocf_mngt_cache_add_core_finish,
.steps = {
@ -572,6 +507,7 @@ void ocf_mngt_cache_add_core(ocf_cache_t cache,
{
struct ocf_cache_add_core_context *context;
ocf_pipeline_t pipeline;
char *name;
void *data;
int result;
@ -580,8 +516,12 @@ void ocf_mngt_cache_add_core(ocf_cache_t cache,
if (!cache->mngt_queue)
OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_INVAL);
result = ocf_pipeline_create(&pipeline, cache,
&ocf_mngt_cache_add_core_pipeline_properties);
if (!cfg->name)
OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_INVAL);
result = ocf_pipeline_create(&pipeline, cache, cfg->try_add ?
&ocf_mngt_cache_try_add_core_pipeline_props :
&ocf_mngt_cache_add_core_pipeline_props);
if (result)
OCF_CMPL_RET(cache, NULL, priv, -OCF_ERR_NO_MEM);
@ -593,10 +533,23 @@ void ocf_mngt_cache_add_core(ocf_cache_t cache,
context->cache = cache;
context->cfg = *cfg;
name = env_vmalloc(OCF_CORE_NAME_SIZE);
if (!name) {
result = -OCF_ERR_NO_MEM;
goto err_pipeline;
}
result = env_strncpy(name, OCF_CORE_NAME_SIZE,
cfg->name, OCF_CORE_NAME_SIZE);
if (result)
goto err_name;
context->cfg.name = name;
data = env_vmalloc(cfg->uuid.size);
if (!data) {
result = -OCF_ERR_NO_MEM;
goto err_pipeline;
goto err_name;
}
result = env_memcpy(data, cfg->uuid.size, cfg->uuid.data,
@ -610,6 +563,8 @@ void ocf_mngt_cache_add_core(ocf_cache_t cache,
err_uuid:
env_vfree(data);
err_name:
env_vfree(context->cfg.name);
err_pipeline:
ocf_pipeline_destroy(context->pipeline);
OCF_CMPL_RET(cache, NULL, priv, result);
@ -848,7 +803,7 @@ void ocf_mngt_cache_detach_core(ocf_core_t core,
context->cmpl = cmpl;
context->priv = priv;
context->pipeline = pipeline;
context->cache = ocf_core_get_cache(core);
context->cache = cache;
context->core = core;
context->core_name = ocf_core_get_name(core);

View File

@ -19,9 +19,9 @@ int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx)
{
int count;
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
count = ctx->core_pool.core_pool_count;
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
return count;
}
@ -43,10 +43,10 @@ int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type)
return result;
}
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
list_add(&volume->core_pool_item, &ctx->core_pool.core_pool_head);
ctx->core_pool.core_pool_count++;
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
return result;
}
@ -59,14 +59,14 @@ int ocf_mngt_core_pool_visit(ocf_ctx_t ctx,
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(visitor);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
list_for_each_entry(svolume, &ctx->core_pool.core_pool_head,
core_pool_item) {
result = visitor(&svolume->uuid, visitor_ctx);
if (result)
break;
}
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
return result;
}
@ -94,10 +94,10 @@ void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_volume_t volume)
{
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(volume);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
ctx->core_pool.core_pool_count--;
list_del(&volume->core_pool_item);
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
ocf_volume_destroy(volume);
}

View File

@ -179,13 +179,12 @@ static int _ocf_mngt_get_sectors(ocf_cache_t cache, ocf_core_id_t core_id,
break;
}
ocf_core_log(&cache->core[core_id], log_debug,
ocf_core_log(core, log_debug,
"%u dirty cache lines to clean\n", j);
if (dirty != j) {
ocf_cache_log(cache, log_debug, "Wrong number of dirty "
"blocks for flushing core %s (%u!=%u)\n",
cache->core[core_id].name, j, dirty);
ocf_core_log(core, log_debug, "Wrong number of dirty "
"blocks for flushing (%u!=%u)\n", j, dirty);
}

View File

@ -17,13 +17,13 @@ uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx)
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
/* currently, there are no macros in list.h to get list size.*/
list_for_each_entry(cache, &ctx->caches, list)
count++;
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
return count;
}

View File

@ -17,22 +17,17 @@ ocf_volume_t ocf_cache_get_volume(ocf_cache_t cache)
return cache->device ? &cache->device->volume : NULL;
}
ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->cache_id;
}
int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size)
{
OCF_CHECK_NULL(cache);
return env_strncpy(cache->name, OCF_CACHE_NAME_SIZE - 1, src, src_size);
return env_strncpy(cache->conf_meta->name, OCF_CACHE_NAME_SIZE,
src, src_size);
}
const char *ocf_cache_get_name(ocf_cache_t cache)
{
OCF_CHECK_NULL(cache);
return cache->name;
return cache->conf_meta->name;
}
bool ocf_cache_is_incomplete(ocf_cache_t cache)

View File

@ -112,10 +112,6 @@ struct ocf_cache {
ocf_eviction_t eviction_policy_init;
int cache_id;
char name[OCF_CACHE_NAME_SIZE];
struct {
/* cache get/put counter */
struct ocf_refcnt cache;
@ -176,9 +172,19 @@ struct ocf_cache {
static inline ocf_core_t ocf_cache_get_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
if (core_id >= OCF_CORE_MAX)
return NULL;
return &cache->core[core_id];
}
#define for_each_core_all(_cache, _core, _id) \
for (_id = 0; _core = &cache->core[_id], _id < OCF_CORE_MAX; _id++)
#define for_each_core(_cache, _core, _id) \
for_each_core_all(_cache, _core, _id) \
if (_core->conf_meta->added)
#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \
ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "%s" prefix, \
fmt, ocf_cache_get_name(cache), ##__VA_ARGS__)

View File

@ -49,19 +49,28 @@ ocf_core_id_t ocf_core_get_id(ocf_core_t core)
return core_id;
}
int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size)
int ocf_core_get_by_name(ocf_cache_t cache, const char *name,
ocf_core_t *core)
{
OCF_CHECK_NULL(core);
OCF_CHECK_NULL(src);
ocf_core_t i_core;
ocf_core_id_t i_core_id;
return env_strncpy(core->name, OCF_CORE_NAME_SIZE - 1, src, src_size);
for_each_core(cache, i_core, i_core_id) {
if (!env_strncmp(ocf_core_get_name(i_core), name,
OCF_CORE_NAME_SIZE)) {
*core = i_core;
return 0;
}
}
return -OCF_ERR_CORE_NOT_EXIST;
}
const char *ocf_core_get_name(ocf_core_t core)
{
OCF_CHECK_NULL(core);
return core->name;
return core->conf_meta->name;
}
ocf_core_state_t ocf_core_get_state(ocf_core_t core)

View File

@ -26,6 +26,8 @@ struct ocf_metadata_uuid {
#define OCF_CORE_USER_DATA_SIZE 64
struct ocf_core_meta_config {
char name[OCF_CORE_NAME_SIZE];
uint8_t type;
/* This bit means that object was added into cache */
@ -69,8 +71,6 @@ struct ocf_core_meta_runtime {
struct ocf_core {
char name[OCF_CORE_NAME_SIZE];
struct ocf_volume front_volume;
struct ocf_volume volume;
@ -97,11 +97,4 @@ int ocf_core_volume_type_init(ocf_ctx_t ctx);
void ocf_core_volume_type_deinit(ocf_ctx_t ctx);
#define for_each_core_all(_cache, _core, _id) \
for (_id = 0; _core = &cache->core[_id], _id < OCF_CORE_MAX; _id++)
#define for_each_core(_cache, _core, _id) \
for_each_core_all(_cache, _core, _id) \
if (core->conf_meta->added)
#endif /* __OCF_CORE_PRIV_H__ */

View File

@ -24,10 +24,10 @@ int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
if (!ctx || !properties)
return -EINVAL;
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
if (type_id >= OCF_VOLUME_TYPE_MAX || ctx->volume_type[type_id]) {
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
result = -EINVAL;
goto err;
}
@ -36,7 +36,7 @@ int ocf_ctx_register_volume_type_extended(ocf_ctx_t ctx, uint8_t type_id,
if (!ctx->volume_type[type_id])
result = -EINVAL;
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
if (result)
goto err;
@ -65,14 +65,14 @@ void ocf_ctx_unregister_volume_type(ocf_ctx_t ctx, uint8_t type_id)
{
OCF_CHECK_NULL(ctx);
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
if (type_id < OCF_VOLUME_TYPE_MAX && ctx->volume_type[type_id]) {
ocf_volume_type_deinit(ctx->volume_type[type_id]);
ctx->volume_type[type_id] = NULL;
}
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
}
/*
@ -160,7 +160,7 @@ int ocf_ctx_create(ocf_ctx_t *ctx, const struct ocf_ctx_config *cfg)
INIT_LIST_HEAD(&ocf_ctx->caches);
env_atomic_set(&ocf_ctx->ref_count, 1);
ret = env_mutex_init(&ocf_ctx->lock);
ret = env_rmutex_init(&ocf_ctx->lock);
if (ret)
goto err_ctx;
@ -216,9 +216,9 @@ void ocf_ctx_put(ocf_ctx_t ctx)
if (env_atomic_dec_return(&ctx->ref_count))
return;
env_mutex_lock(&ctx->lock);
env_rmutex_lock(&ctx->lock);
ENV_BUG_ON(!list_empty(&ctx->caches));
env_mutex_unlock(&ctx->lock);
env_rmutex_unlock(&ctx->lock);
ocf_mngt_core_pool_deinit(ctx);
ocf_core_volume_type_deinit(ctx);

View File

@ -22,7 +22,7 @@ struct ocf_ctx {
struct ocf_logger logger;
struct ocf_volume_type *volume_type[OCF_VOLUME_TYPE_MAX];
env_atomic ref_count;
env_mutex lock;
env_rmutex lock;
struct list_head caches;
struct {
struct list_head core_pool_head;

View File

@ -48,7 +48,7 @@ static int _ocf_trace_cache_info(ocf_cache_t cache, ocf_queue_t io_queue)
env_ticks_to_nsecs(env_get_tick_count()),
sizeof(cache_desc));
cache_desc.id = ocf_cache_get_id(cache);
cache_desc.name = ocf_cache_get_name(cache);
cache_desc.cache_line_size = ocf_cache_get_line_size(cache);
cache_desc.cache_mode = ocf_cache_get_mode(cache);
@ -85,9 +85,7 @@ int ocf_mngt_start_trace(ocf_cache_t cache, void *trace_ctx,
return -EINVAL;
if (cache->trace.trace_callback) {
ocf_cache_log(cache, log_err,
"Tracing already started for cache %u\n",
ocf_cache_get_id(cache));
ocf_cache_log(cache, log_err, "Tracing already started\n");
return -EINVAL;
}
@ -107,8 +105,7 @@ int ocf_mngt_start_trace(ocf_cache_t cache, void *trace_ctx,
}
}
ocf_cache_log(cache, log_info,
"Tracing started for cache %u\n", ocf_cache_get_id(cache));
ocf_cache_log(cache, log_info, "Tracing started\n");
return result;
}
@ -120,9 +117,7 @@ int ocf_mngt_stop_trace(ocf_cache_t cache)
OCF_CHECK_NULL(cache);
if (!cache->trace.trace_callback) {
ocf_cache_log(cache, log_err,
"Tracing not started for cache %u\n",
ocf_cache_get_id(cache));
ocf_cache_log(cache, log_err, "Tracing not started\n");
return -EINVAL;
}

View File

@ -42,7 +42,6 @@ class Backfill(Structure):
class CacheConfig(Structure):
_fields_ = [
("_id", c_uint16),
("_name", c_char_p),
("_cache_mode", c_uint32),
("_eviction_policy", c_uint32),
@ -134,8 +133,7 @@ class Cache:
def __init__(
self,
owner,
cache_id: int = DEFAULT_ID,
name: str = "",
name: str = "cache",
cache_mode: CacheMode = CacheMode.DEFAULT,
eviction_policy: EvictionPolicy = EvictionPolicy.DEFAULT,
promotion_policy: PromotionPolicy = PromotionPolicy.DEFAULT,
@ -154,8 +152,7 @@ class Cache:
self.cache_line_size = cache_line_size
self.cfg = CacheConfig(
_id=cache_id,
_name=name.encode("ascii") if name else None,
_name=cast(create_string_buffer(name.encode("ascii")), c_char_p),
_cache_mode=cache_mode,
_eviction_policy=eviction_policy,
_promotion_policy=promotion_policy,
@ -437,7 +434,7 @@ class Cache:
raise OcfError("Failed getting stats", status)
line_size = CacheLineSize(cache_info.cache_line_size)
cache_id = self.owner.lib.ocf_cache_get_id(self)
cache_name = self.owner.lib.ocf_cache_get_name(self).decode("ascii")
self.read_unlock()
return {
@ -468,7 +465,7 @@ class Cache:
"core_count": cache_info.core_count,
"metadata_footprint": Size(cache_info.metadata_footprint),
"metadata_end_offset": Size(cache_info.metadata_end_offset),
"cache_id": cache_id,
"cache_name": cache_name,
},
"block": struct_to_dict(block),
"req": struct_to_dict(req),

View File

@ -38,11 +38,9 @@ class UserMetadata(Structure):
class CoreConfig(Structure):
_fields_ = [
("_name", c_char_p),
("_uuid", Uuid),
("_volume_type", c_uint8),
("_core_id", c_uint16),
("_name", c_char_p),
("_cache_id", c_uint16),
("_try_add", c_bool),
("_seq_cutoff_threshold", c_uint32),
("_user_metadata", UserMetadata),
@ -57,14 +55,12 @@ class Core:
self,
device: Volume,
try_add: bool,
name: str = "",
core_id: int = DEFAULT_ID,
name: str = "core",
seq_cutoff_threshold: int = DEFAULT_SEQ_CUTOFF_THRESHOLD,
):
self.cache = None
self.device = device
self.device_name = device.uuid
self.core_id = core_id
self.handle = c_void_p()
self.cfg = CoreConfig(
_uuid=Uuid(
@ -74,8 +70,7 @@ class Core:
),
_size=len(self.device_name) + 1,
),
_core_id=self.core_id,
_name=name.encode("ascii") if name else None,
_name=cast(create_string_buffer(name.encode("ascii")), c_char_p),
_volume_type=self.device.type_id,
_try_add=try_add,
_seq_cutoff_threshold=seq_cutoff_threshold,

View File

@ -118,5 +118,5 @@ def get_default_ctx(logger):
lib = OcfLib.getInstance()
lib.ocf_mngt_cache_get_by_id.argtypes = [c_void_p, c_void_p, c_void_p]
lib.ocf_mngt_cache_get_by_id.restype = c_int
lib.ocf_mngt_cache_get_by_name.argtypes = [c_void_p, c_void_p, c_void_p]
lib.ocf_mngt_cache_get_by_name.restype = c_int

View File

@ -27,7 +27,9 @@ class OcfErrorCode(IntEnum):
OCF_ERR_NO_FREE_RAM = auto()
OCF_ERR_START_CACHE_FAIL = auto()
OCF_ERR_CACHE_NOT_EXIST = auto()
OCF_ERR_CORE_NOT_EXIST = auto()
OCF_ERR_CACHE_EXIST = auto()
OCF_ERR_CORE_EXIST = auto()
OCF_ERR_TOO_MANY_CORES = auto()
OCF_ERR_CORE_NOT_AVAIL = auto()
OCF_ERR_NOT_OPEN_EXC = auto()

View File

@ -128,7 +128,7 @@ def test_add_remove_30core(pyocf_ctx):
stats = cache.get_stats()
assert stats["conf"]["core_count"] == i
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)
@ -152,13 +152,13 @@ def test_adding_to_random_cache(pyocf_ctx):
# Create 5 cache devices
for i in range(0, cache_amount):
cache_device = Volume(S.from_MiB(30))
cache = Cache.start_on_device(cache_device)
cache = Cache.start_on_device(cache_device, name=f"cache{i}")
cache_devices.append(cache)
# Create 50 core devices and add to random cache
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
core = Core.using_device(core_device, name=f"core{i}")
core_devices[core] = randint(0, cache_amount - 1)
cache_devices[core_devices[core]].add_core(core)
@ -204,13 +204,13 @@ def test_adding_core_already_used(pyocf_ctx, cache_mode, cls):
# Start first cache device
cache_device1 = Volume(S.from_MiB(30))
cache1 = Cache.start_on_device(
cache_device1, cache_mode=cache_mode, cache_line_size=cls
cache_device1, cache_mode=cache_mode, cache_line_size=cls, name="cache1"
)
# Start second cache device
cache_device2 = Volume(S.from_MiB(30))
cache2 = Cache.start_on_device(
cache_device2, cache_mode=cache_mode, cache_line_size=cls
cache_device2, cache_mode=cache_mode, cache_line_size=cls, name="cache2"
)
# Create core device
@ -246,7 +246,7 @@ def test_add_remove_incrementally(pyocf_ctx, cache_mode, cls):
# Create 5 core devices and add to cache
for i in range(0, core_amount):
core_device = Volume(S.from_MiB(10))
core = Core.using_device(core_device)
core = Core.using_device(core_device, name=f"core{i}")
core_devices.append(core)
cache.add_core(core)

View File

@ -64,9 +64,9 @@ def test_cache_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
# Create 2 core devices
core_device1 = Volume(S.from_MiB(10))
core1 = Core.using_device(core_device1)
core1 = Core.using_device(core_device1, name="core1")
core_device2 = Volume(S.from_MiB(10))
core2 = Core.using_device(core_device2)
core2 = Core.using_device(core_device2, name="core2")
# Add cores
cache.add_core(core1)
@ -103,9 +103,9 @@ def test_core_change_seq_cut_off_policy(pyocf_ctx, cm, cls):
# Create 2 core devices
core_device1 = Volume(S.from_MiB(10))
core1 = Core.using_device(core_device1)
core1 = Core.using_device(core_device1, name="core1")
core_device2 = Volume(S.from_MiB(10))
core2 = Core.using_device(core_device2)
core2 = Core.using_device(core_device2, name="core2")
# Add cores
cache.add_core(core1)

View File

@ -6,6 +6,7 @@
import logging
from ctypes import c_int, c_void_p, byref
from random import randrange
from itertools import count
import pytest
@ -134,7 +135,6 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
cache_device = Volume(Size.from_MiB(20))
queue_size = randrange(60000, 2**32)
unblock_size = randrange(1, queue_size)
cache_id = randrange(1, 16385)
volatile_metadata = randrange(2) == 1
unaligned_io = randrange(2) == 1
submit_fast = randrange(2) == 1
@ -145,7 +145,6 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
cache_device,
cache_mode=mode,
cache_line_size=cls,
cache_id=cache_id,
name=name,
metadata_layout=MetadataLayout.SEQUENTIAL,
metadata_volatile=volatile_metadata,
@ -158,7 +157,6 @@ def test_start_params(pyocf_ctx, mode: CacheMode, cls: CacheLineSize, layout: Me
assert stats["conf"]["cache_mode"] == mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cls, "Cache line size"
assert stats["conf"]["eviction_policy"] == EvictionPolicy.DEFAULT, "Eviction policy"
assert stats["conf"]["cache_id"] == cache_id, "Cache id"
assert cache.get_name() == name, "Cache name"
# TODO: metadata_layout, metadata_volatile, max_queue_size,
# queue_unblock_size, pt_unaligned_io, use_submit_fast
@ -209,27 +207,29 @@ def test_start_stop_multiple(pyocf_ctx):
caches_no = randrange(6, 11)
for i in range(1, caches_no):
cache_device = Volume(Size.from_MiB(20))
cache_name = f"cache{i}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
caches.append(cache)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
assert stats["conf"]["cache_id"] == i, "Cache id"
assert stats["conf"]["cache_name"] == cache_name, "Cache name"
caches.sort(key=lambda e: randrange(1000))
for cache in caches:
logger.info("Getting stats before stopping cache")
stats = cache.get_stats()
cache_id = stats["conf"]["cache_id"]
cache_name = stats["conf"]["cache_name"]
cache.stop()
assert get_cache_by_id(pyocf_ctx, cache_id) != 0, "Try getting cache after stopping it"
assert get_cache_by_name(pyocf_ctx, cache_name) != 0, "Try getting cache after stopping it"
def test_100_start_stop(pyocf_ctx):
@ -239,20 +239,22 @@ def test_100_start_stop(pyocf_ctx):
for i in range(1, 101):
cache_device = Volume(Size.from_MiB(20))
cache_name = f"cache{i}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
assert stats["conf"]["cache_id"] == 1, "Cache id"
assert stats["conf"]["cache_name"] == cache_name, "Cache name"
cache.stop()
assert get_cache_by_id(pyocf_ctx, 1) != 0, "Try getting cache after stopping it"
assert get_cache_by_name(pyocf_ctx, "cache1") != 0, "Try getting cache after stopping it"
def test_start_stop_incrementally(pyocf_ctx):
@ -262,6 +264,7 @@ def test_start_stop_incrementally(pyocf_ctx):
and then proportions are reversed and number of caches gradually falls to 0.
"""
counter = count()
caches = []
caches_limit = 10
add = True
@ -271,19 +274,21 @@ def test_start_stop_incrementally(pyocf_ctx):
if add:
for i in range(0, randrange(3, 5) if increase else randrange(1, 3)):
cache_device = Volume(Size.from_MiB(20))
cache_name = f"cache{next(counter)}"
cache_mode = CacheMode(randrange(0, len(CacheMode)))
size = 4096 * 2**randrange(0, len(CacheLineSize))
cache_line_size = CacheLineSize(size)
cache = Cache.start_on_device(
cache_device,
name=cache_name,
cache_mode=cache_mode,
cache_line_size=cache_line_size)
caches.append(cache)
stats = cache.get_stats()
assert stats["conf"]["cache_mode"] == cache_mode, "Cache mode"
assert stats["conf"]["cache_line_size"] == cache_line_size, "Cache line size"
assert stats["conf"]["cache_id"] == len(caches), "Cache id"
assert stats["conf"]["cache_name"] == cache_name, "Cache name"
if len(caches) == caches_limit:
increase = False
else:
@ -294,34 +299,34 @@ def test_start_stop_incrementally(pyocf_ctx):
cache = caches.pop()
logger.info("Getting stats before stopping cache")
stats = cache.get_stats()
cache_id = stats["conf"]["cache_id"]
cache_name = stats["conf"]["cache_name"]
cache.stop()
assert get_cache_by_id(pyocf_ctx, cache_id) !=\
0, "Try getting cache after stopping it"
assert get_cache_by_name(pyocf_ctx, cache_name) != 0, \
"Try getting cache after stopping it"
add = not add
@pytest.mark.parametrize("mode", CacheMode)
@pytest.mark.parametrize("cls", CacheLineSize)
def test_start_cache_same_id(pyocf_ctx, mode, cls):
"""Adding two caches with the same cache_id
Check that OCF does not allow for 2 caches to be started with the same cache_id
"""Adding two caches with the same name
Check that OCF does not allow for 2 caches to be started with the same cache_name
"""
cache_device1 = Volume(Size.from_MiB(20))
cache_device2 = Volume(Size.from_MiB(20))
cache_id = randrange(1, 16385)
cache_name = "cache"
cache = Cache.start_on_device(cache_device1,
cache_mode=mode,
cache_line_size=cls,
cache_id=cache_id)
name=cache_name)
cache.get_stats()
with pytest.raises(OcfError, match="OCF_ERR_CACHE_EXIST"):
cache = Cache.start_on_device(cache_device2,
cache_mode=mode,
cache_line_size=cls,
cache_id=cache_id)
name=cache_name)
cache.get_stats()
@ -333,11 +338,15 @@ def test_start_cache_same_device(pyocf_ctx, mode, cls):
"""
cache_device = Volume(Size.from_MiB(20))
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache = Cache.start_on_device(
cache_device, cache_mode=mode, cache_line_size=cls, name="cache1"
)
cache.get_stats()
with pytest.raises(OcfError, match="OCF_ERR_NOT_OPEN_EXC"):
cache = Cache.start_on_device(cache_device, cache_mode=mode, cache_line_size=cls)
cache = Cache.start_on_device(
cache_device, cache_mode=mode, cache_line_size=cls, name="cache2"
)
cache.get_stats()
@ -504,8 +513,8 @@ def check_md5_sums(exported_obj: Core, mode: CacheMode):
"MD5 check: core device vs exported object"
def get_cache_by_id(ctx, cache_id):
def get_cache_by_name(ctx, cache_name):
cache_pointer = c_void_p()
return OcfLib.getInstance().ocf_mngt_cache_get_by_id(ctx.ctx_handle,
cache_id,
byref(cache_pointer))
return OcfLib.getInstance().ocf_mngt_cache_get_by_name(
ctx.ctx_handle, cache_name, byref(cache_pointer)
)