Merge pull request #930 from robertbaldyga/cache-passive-state

Add cache passive state
This commit is contained in:
Robert Baldyga 2021-09-10 13:09:23 +02:00 committed by GitHub
commit 6792c1b455
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 529 additions and 206 deletions

View File

@ -68,6 +68,7 @@ static const char *cache_states_name[ocf_cache_state_max + 1] = {
[ocf_cache_state_stopping] = "Stopping",
[ocf_cache_state_initializing] = "Initializing",
[ocf_cache_state_incomplete] = "Incomplete",
[ocf_cache_state_passive] = "Passive",
[ocf_cache_state_max] = "Unknown",
};
@ -2775,9 +2776,13 @@ int list_caches(unsigned int list_format, bool by_id_path)
cache_mode_to_name(curr_cache->mode));
} else {
tmp_status = get_cache_state_name(curr_cache->state);
if (curr_cache->state & (1 << ocf_cache_state_passive)) {
strncpy(mode_string, "-", sizeof(mode_string));
} else {
snprintf(mode_string, sizeof(mode_string), "%s",
cache_mode_to_name(curr_cache->mode));
}
}
fprintf(intermediate_file[1], TAG(TREE_BRANCH)
"%s,%u,%s,%s,%s,%s\n",

View File

@ -198,6 +198,10 @@ int start_cache_command_handle_option(char *opt, const char **arg)
command_args_values.cache_id = atoi(arg[0]);
} else if (!strcmp(opt, "load")) {
command_args_values.state = CACHE_INIT_LOAD;
} else if (!strcmp(opt, "bind")) {
command_args_values.state = CACHE_INIT_BIND;
} else if (!strcmp(opt, "activate")) {
command_args_values.state = CACHE_INIT_ACTIVATE;
} else if (!strcmp(opt, "cache-device")) {
if(validate_device_name(arg[0]) == FAILURE)
return FAILURE;
@ -243,6 +247,8 @@ static cli_option start_options[] = {
{'d', "cache-device", CACHE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED},
{'i', "cache-id", CACHE_ID_DESC_LONG, 1, "ID", 0},
{'l', "load", "Load cache metadata from caching device (DANGEROUS - see manual or Admin Guide for details)"},
{'b', "bind", "Bind caching device (DANGEROUS - see manual or Admin Guide for details)"},
{'a', "activate", "Activate caching device after bind"},
{'f', "force", "Force the creation of cache instance"},
{'c', "cache-mode", "Set cache mode from available: {"CAS_CLI_HELP_START_CACHE_MODES"} "CAS_CLI_HELP_START_CACHE_MODES_FULL"; without this parameter Write-Through will be set by default", 1, "NAME"},
{'x', "cache-line-size", "Set cache line size in kibibytes: {4,8,16,32,64}[KiB] (default: %d)", 1, "NUMBER", CLI_OPTION_DEFAULT_INT, 0, 0, ocf_cache_line_size_default / KiB},
@ -293,6 +299,11 @@ int handle_start()
return FAILURE;
}
if (command_args_values.state == CACHE_INIT_BIND && command_args_values.force) {
cas_printf(LOG_ERR, "Use of 'bind' and 'force' simultaneously is forbidden.\n");
return FAILURE;
}
cache_device = open(command_args_values.cache_device, O_RDONLY);
if (cache_device < 0) {

View File

@ -1215,7 +1215,8 @@ ocf_part_id_t cas_cls_classify(ocf_cache_t cache, struct bio *bio)
cas_cls_eval_t ret;
cls = cas_get_classifier(cache);
ENV_BUG_ON(!cls);
if (!cls)
return 0;
_cas_cls_get_bio_context(bio, &io);

View File

@ -576,6 +576,7 @@ static int exit_instance_finish(void *data)
else
result = ctx->error;
if (!ocf_cache_is_passive(ctx->cache))
cas_cls_deinit(ctx->cache);
vfree(cache_priv);
@ -1344,11 +1345,11 @@ int cache_mngt_add_core_to_cache(const char *cache_name, size_t name_len,
if (result)
goto error_affter_lock;
result = block_dev_create_exported_object(core);
result = kcas_core_create_exported_object(core);
if (result)
goto error_after_add_core;
result = block_dev_activate_exported_object(core);
result = kcas_core_activate_exported_object(core);
if (result)
goto error_after_create_exported_object;
@ -1366,7 +1367,7 @@ int cache_mngt_add_core_to_cache(const char *cache_name, size_t name_len,
return 0;
error_after_create_exported_object:
block_dev_destroy_exported_object(core);
kcas_core_destroy_exported_object(core);
error_after_add_core:
init_completion(&remove_context.cmpl);
@ -1382,19 +1383,6 @@ error_affter_lock:
return result;
}
static int _cache_mngt_create_exported_object(ocf_core_t core, void *cntx)
{
int result;
result = block_dev_create_exported_object(core);
if (result)
return result;
result = block_dev_activate_exported_object(core);
return result;
}
static int _cache_mngt_remove_core_flush(ocf_cache_t cache,
struct kcas_remove_core *cmd)
{
@ -1485,7 +1473,7 @@ static int _cache_mngt_remove_core_prepare(ocf_cache_t cache, ocf_core_t core,
if (!core_active)
return -OCF_ERR_CORE_IN_INACTIVE_STATE;
result = block_dev_destroy_exported_object(core);
result = kcas_core_destroy_exported_object(core);
if (result)
return result;
@ -1589,7 +1577,7 @@ int cache_mngt_remove_inactive_core(struct kcas_remove_inactive *cmd)
* exported object, instead of trying rolling this back we rather
* inform user about error.
*/
result = block_dev_destroy_exported_object(core);
result = kcas_core_destroy_exported_object(core);
if (result)
goto unlock;
@ -1726,9 +1714,22 @@ out_get:
return result;
}
static int _cache_mngt_destroy_exported_object(ocf_core_t core, void *cntx)
static int _cache_mngt_create_core_exp_obj(ocf_core_t core, void *cntx)
{
if (block_dev_destroy_exported_object(core)) {
int result;
result = kcas_core_create_exported_object(core);
if (result)
return result;
result = kcas_core_activate_exported_object(core);
return result;
}
static int _cache_mngt_destroy_core_exp_obj(ocf_core_t core, void *cntx)
{
if (kcas_core_destroy_exported_object(core)) {
ocf_cache_t cache = ocf_core_get_cache(core);
printk(KERN_ERR "Cannot to destroy exported object, %s.%s\n",
@ -1739,32 +1740,53 @@ static int _cache_mngt_destroy_exported_object(ocf_core_t core, void *cntx)
return 0;
}
static int cache_mngt_initialize_core_objects(ocf_cache_t cache)
static int cache_mngt_initialize_core_exported_objects(ocf_cache_t cache)
{
int result;
result = ocf_core_visit(cache, _cache_mngt_create_exported_object, NULL,
result = ocf_core_visit(cache, _cache_mngt_create_core_exp_obj, NULL,
true);
if (result) {
/* Need to cleanup */
ocf_core_visit(cache, _cache_mngt_destroy_exported_object, NULL,
ocf_core_visit(cache, _cache_mngt_destroy_core_exp_obj, NULL,
true);
}
return result;
}
static void cache_mngt_destroy_cache_exp_obj(ocf_cache_t cache)
{
if (kcas_cache_destroy_exported_object(cache)) {
printk(KERN_ERR "Cannot destroy %s exported object\n",
ocf_cache_get_name(cache));
}
}
static int cache_mngt_initialize_cache_exported_object(ocf_cache_t cache)
{
int result;
result = kcas_cache_create_exported_object(cache);
if (result)
return result;
result = kcas_cache_activate_exported_object(cache);
if (result) {
cache_mngt_destroy_cache_exp_obj(cache);
return result;
}
return 0;
}
int cache_mngt_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg,
struct ocf_mngt_cache_device_config *device_cfg,
struct kcas_start_cache *cmd)
{
int init_cache, result;
struct block_device *bdev;
int part_count;
char holder[] = "CAS START\n";
char cache_name[OCF_CACHE_NAME_SIZE];
uint16_t cache_id;
bool is_part;
if (!cmd)
return -OCF_ERR_INVAL;
@ -1810,28 +1832,15 @@ int cache_mngt_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg,
switch (init_cache) {
case CACHE_INIT_LOAD:
case CACHE_INIT_ACTIVATE:
device_cfg->open_cores = true;
case CACHE_INIT_NEW:
case CACHE_INIT_BIND:
break;
default:
return -OCF_ERR_INVAL;
}
bdev = blkdev_get_by_path(device_cfg->uuid.data, (FMODE_EXCL|FMODE_READ),
holder);
if (IS_ERR(bdev)) {
return (PTR_ERR(bdev) == -EBUSY) ?
-OCF_ERR_NOT_OPEN_EXC :
-OCF_ERR_INVAL_VOLUME_TYPE;
}
is_part = (cas_bdev_whole(bdev) != bdev);
part_count = cas_blk_get_part_count(bdev);
blkdev_put(bdev, (FMODE_EXCL|FMODE_READ));
if (!is_part && part_count > 1 && !device_cfg->force)
return -KCAS_ERR_CONTAINS_PART;
result = cas_blk_identify_type(device_cfg->uuid.data,
&device_cfg->volume_type);
if (result)
@ -2042,7 +2051,8 @@ out_bdev:
return result;
}
static int _cache_start_finalize(ocf_cache_t cache)
static int _cache_start_finalize(ocf_cache_t cache,
struct kcas_start_cache *cmd)
{
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct _cache_mngt_attach_context *ctx = cache_priv->attach_context;
@ -2050,27 +2060,159 @@ static int _cache_start_finalize(ocf_cache_t cache)
_cache_mngt_log_cache_device_path(cache, ctx->device_cfg);
if (cmd->init_cache != CACHE_INIT_BIND) {
result = cas_cls_init(cache);
if (result) {
ctx->ocf_start_error = result;
return result;
}
ctx->cls_inited = true;
}
result = cache_mngt_initialize_core_objects(cache);
switch(cmd->init_cache) {
case CACHE_INIT_ACTIVATE:
cache_mngt_destroy_cache_exp_obj(cache);
/* fall through */
case CACHE_INIT_LOAD:
result = cache_mngt_initialize_core_exported_objects(cache);
if (result) {
ctx->ocf_start_error = result;
return result;
}
ocf_core_visit(cache, _cache_mngt_core_device_loaded_visitor,
NULL, false);
break;
case CACHE_INIT_BIND:
result = cache_mngt_initialize_cache_exported_object(cache);
if (result) {
ctx->ocf_start_error = result;
return result;
}
break;
case CACHE_INIT_NEW:
break;
default:
BUG();
}
init_instance_complete(ctx, cache);
return 0;
}
int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *device_cfg)
{
char holder[] = "CAS START\n";
struct block_device *bdev;
int part_count;
bool is_part;
bdev = blkdev_get_by_path(device_cfg->uuid.data,
(FMODE_EXCL|FMODE_READ), holder);
if (IS_ERR(bdev)) {
return (PTR_ERR(bdev) == -EBUSY) ?
-OCF_ERR_NOT_OPEN_EXC :
-OCF_ERR_INVAL_VOLUME_TYPE;
}
is_part = (cas_bdev_whole(bdev) != bdev);
part_count = cas_blk_get_part_count(bdev);
blkdev_put(bdev, (FMODE_EXCL|FMODE_READ));
if (!is_part && part_count > 1 && !device_cfg->force)
return -KCAS_ERR_CONTAINS_PART;
return 0;
}
int cache_mngt_init_instance_activate(struct ocf_mngt_cache_config *cfg,
struct ocf_mngt_cache_device_config *device_cfg,
struct kcas_start_cache *cmd)
{
struct _cache_mngt_attach_context *context;
ocf_cache_t cache;
struct cache_priv *cache_priv;
ocf_volume_t cache_volume;
const struct ocf_volume_uuid *cache_uuid;
char cache_name[OCF_CACHE_NAME_SIZE];
int result = 0;
if (!try_module_get(THIS_MODULE))
return -KCAS_ERR_SYSTEM;
cache_name_from_id(cache_name, cmd->cache_id);
result = ocf_mngt_cache_get_by_name(cas_ctx, cache_name,
OCF_CACHE_NAME_SIZE, &cache);
if (result)
goto out_module_put;
if (!ocf_cache_is_passive(cache)) {
result = -OCF_ERR_CACHE_EXIST;
goto out_cache_put;
}
result = _cache_mngt_lock_sync(cache);
if (result)
goto out_cache_put;
cache_volume = ocf_cache_get_volume(cache);
cache_uuid = ocf_volume_get_uuid(cache_volume);
if (strcmp(device_cfg->uuid.data, cache_uuid->data) != 0) {
result = cache_mngt_check_bdev(device_cfg);
if (result)
goto out_cache_unlock;
}
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
result = -ENOMEM;
goto out_cache_unlock;
}
context->device_cfg = device_cfg;
context->cmd = cmd;
context->cache = cache;
cache_priv = ocf_cache_get_priv(cache);
cache_priv->attach_context = context;
context->rollback_thread = cas_lazy_thread_create(cache_start_rollback,
context, "cas_cache_rollback_complete");
if (IS_ERR(context->rollback_thread)) {
result = PTR_ERR(context->rollback_thread);
goto err_free_context;
}
_cache_mngt_async_context_init(&context->async);
ocf_mngt_cache_activate(cache, device_cfg,
_cache_mngt_start_complete, context);
result = wait_for_completion_interruptible(&context->async.cmpl);
result = _cache_mngt_async_caller_set_result(&context->async, result);
if (result == -KCAS_ERR_WAITING_INTERRUPTED)
goto out_cache_put;
cas_lazy_thread_stop(context->rollback_thread);
if (result)
goto err_free_context;
result = _cache_start_finalize(cache, cmd);
err_free_context:
kfree(context);
cache_priv->attach_context = NULL;
out_cache_unlock:
ocf_mngt_cache_unlock(cache);
out_cache_put:
ocf_mngt_cache_put(cache);
out_module_put:
module_put(THIS_MODULE);
return result;
}
int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg,
struct ocf_mngt_cache_device_config *device_cfg,
struct kcas_start_cache *cmd)
@ -2079,12 +2221,20 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg,
ocf_cache_t cache;
struct cache_priv *cache_priv;
int result = 0, rollback_result = 0;
bool load = (cmd && cmd->init_cache == CACHE_INIT_LOAD);
if (cmd->init_cache == CACHE_INIT_ACTIVATE)
return cache_mngt_init_instance_activate(cfg, device_cfg, cmd);
if (!try_module_get(THIS_MODULE))
return -KCAS_ERR_SYSTEM;
if (load)
result = cache_mngt_check_bdev(device_cfg);
if (result) {
module_put(THIS_MODULE);
return result;
}
if (cmd->init_cache == CACHE_INIT_LOAD)
result = _cache_mngt_check_metadata(cfg, cmd->cache_path_name);
if (result) {
module_put(THIS_MODULE);
@ -2134,12 +2284,22 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg,
cache_priv = ocf_cache_get_priv(cache);
cache_priv->attach_context = context;
if (load) {
ocf_mngt_cache_load(cache, device_cfg,
_cache_mngt_start_complete, context);
} else {
switch (cmd->init_cache) {
case CACHE_INIT_NEW:
ocf_mngt_cache_attach(cache, device_cfg,
_cache_mngt_start_complete, context);
break;
case CACHE_INIT_LOAD:
ocf_mngt_cache_load(cache, device_cfg,
_cache_mngt_start_complete, context);
break;
case CACHE_INIT_BIND:
ocf_mngt_cache_bind(cache, device_cfg,
_cache_mngt_start_complete, context);
break;
default:
result = -OCF_ERR_INVAL;
goto err;
}
result = wait_for_completion_interruptible(&context->async.cmpl);
@ -2150,7 +2310,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg,
if (result)
goto err;
result = _cache_start_finalize(cache);
result = _cache_start_finalize(cache, cmd);
if (result)
goto err;
@ -2474,7 +2634,7 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush)
* this time, so we need to flush cache again after disabling
* exported object. The second flush should be much faster.
*/
if (flush)
if (flush && ocf_cache_is_running(cache))
status = _cache_flush_with_lock(cache);
if (status)
goto put;
@ -2491,19 +2651,24 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush)
}
/* Destroy cache devices */
status = block_dev_destroy_all_exported_objects(cache);
status = kcas_cache_destroy_all_core_exported_objects(cache);
if (status != 0) {
printk(KERN_WARNING
"Failed to remove all cached devices\n");
goto stop_thread;
}
status = kcas_cache_destroy_exported_object(cache);
if (status != 0) {
printk(KERN_WARNING
"Failed to remove cache exported object\n");
goto stop_thread;
}
/* Flush cache again. This time we don't allow interruption. */
if (flush)
if (flush && ocf_cache_is_running(cache))
flush_status = _cache_mngt_cache_flush_uninterruptible(cache);
context->flush_status = flush_status;
if (flush && !flush_status)
BUG_ON(ocf_mngt_cache_is_dirty(cache));

View File

@ -33,6 +33,9 @@ struct bd_object {
struct workqueue_struct *expobj_wq;
/*< Workqueue for I/O handled by top vol */
ocf_volume_t front_volume;
/*< Cache/core front volume */
};
static inline struct bd_object *bd_object(ocf_volume_t vol)

View File

@ -6,7 +6,7 @@
#include "cas_cache.h"
#include "utils/cas_err.h"
static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
static void blkdev_set_bio_data(struct blk_data *data, struct bio *bio)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
struct bio_vec *bvec;
@ -30,7 +30,7 @@ static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
#endif
}
static inline int _blkdev_can_hndl_bio(struct bio *bio)
static inline int blkdev_can_hndl_bio(struct bio *bio)
{
if (CAS_CHECK_BARRIER(bio)) {
CAS_PRINT_RL(KERN_WARNING
@ -42,7 +42,7 @@ static inline int _blkdev_can_hndl_bio(struct bio *bio)
return 0;
}
void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
void blkdev_set_exported_object_flush_fua(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t core_vol = ocf_core_get_volume(core);
@ -66,7 +66,7 @@ void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
cas_set_queue_flush_fua(exp_q, flush, fua);
}
static void _blockdev_set_discard_properties(ocf_cache_t cache,
static void blkdev_set_discard_properties(ocf_cache_t cache,
struct request_queue *exp_q, struct block_device *cache_bd,
struct block_device *core_bd, sector_t core_sectors)
{
@ -97,7 +97,7 @@ static void _blockdev_set_discard_properties(ocf_cache_t cache,
* Map geometry of underlying (core) object geometry (sectors etc.)
* to geometry of exported object.
*/
static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
static int blkdev_core_set_geometry(struct casdsk_disk *dsk, void *private)
{
ocf_core_t core;
ocf_cache_t cache;
@ -148,9 +148,9 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
/* We don't want to receive splitted requests*/
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
_blockdev_set_exported_object_flush_fua(core);
blkdev_set_exported_object_flush_fua(core);
_blockdev_set_discard_properties(cache, exp_q, cache_bd, core_bd,
blkdev_set_discard_properties(cache, exp_q, cache_bd, core_bd,
sectors);
return 0;
@ -158,26 +158,24 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
struct defer_bio_context {
struct work_struct io_work;
void (*cb)(ocf_core_t core, struct bio *bio);
ocf_core_t core;
void (*cb)(struct bd_object *bvol, struct bio *bio);
struct bd_object *bvol;
struct bio *bio;
};
static void _blockdev_defer_bio_work(struct work_struct *work)
static void blkdev_defer_bio_work(struct work_struct *work)
{
struct defer_bio_context *context;
context = container_of(work, struct defer_bio_context, io_work);
context->cb(context->core, context->bio);
context->cb(context->bvol, context->bio);
kfree(context);
}
static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
void (*cb)(ocf_core_t core, struct bio *bio))
static void blkdev_defer_bio(struct bd_object *bvol, struct bio *bio,
void (*cb)(struct bd_object *bvol, struct bio *bio))
{
struct defer_bio_context *context;
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
BUG_ON(!bvol->expobj_wq);
@ -190,12 +188,12 @@ static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
context->cb = cb;
context->bio = bio;
context->core = core;
INIT_WORK(&context->io_work, _blockdev_defer_bio_work);
context->bvol = bvol;
INIT_WORK(&context->io_work, blkdev_defer_bio_work);
queue_work(bvol->expobj_wq, &context->io_work);
}
static void _block_dev_complete_data(struct blk_data *master, int error)
static void blkdev_complete_data_master(struct blk_data *master, int error)
{
master->error = master->error ?: error;
@ -208,7 +206,7 @@ static void _block_dev_complete_data(struct blk_data *master, int error)
cas_free_blk_data(master);
}
static void block_dev_complete_data(struct ocf_io *io, int error)
static void blkdev_complete_data(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
struct blk_data *master = io->priv2;
@ -220,38 +218,36 @@ static void block_dev_complete_data(struct ocf_io *io, int error)
if (bio != master->bio)
bio_put(bio);
_block_dev_complete_data(master, error);
blkdev_complete_data_master(master, error);
}
struct blockdev_data_master_ctx {
struct blkdev_data_master_ctx {
struct blk_data *data;
struct bio *bio;
uint32_t master_size;
unsigned long long start_time;
};
static int _blockdev_handle_data_single(ocf_core_t core, struct bio *bio,
struct blockdev_data_master_ctx *master_ctx)
static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio,
struct blkdev_data_master_ctx *master_ctx)
{
ocf_cache_t cache;
struct cache_priv *cache_priv;
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
cache = ocf_core_get_cache(core);
cache_priv = ocf_cache_get_priv(cache);
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
return -ENOMEM;
}
_blockdev_set_bio_data(data, bio);
blkdev_set_bio_data(data, bio);
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
@ -280,21 +276,21 @@ static int _blockdev_handle_data_single(ocf_core_t core, struct bio *bio,
atomic_inc(&master_ctx->data->master_remaining);
ocf_io_set_cmpl(io, bio, master_ctx->data, block_dev_complete_data);
ocf_io_set_cmpl(io, bio, master_ctx->data, blkdev_complete_data);
ocf_core_submit_io(io);
ocf_volume_submit_io(io);
return 0;
}
static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
static void blkdev_handle_data(struct bd_object *bvol, struct bio *bio)
{
const uint32_t max_io_sectors = (32*MiB) >> SECTOR_SHIFT;
const uint32_t align_sectors = (128*KiB) >> SECTOR_SHIFT;
struct bio *split = NULL;
uint32_t sectors, to_submit;
int error;
struct blockdev_data_master_ctx master_ctx = {
struct blkdev_data_master_ctx master_ctx = {
.bio = bio,
.master_size = CAS_BIO_BISIZE(bio),
};
@ -320,12 +316,12 @@ static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
sectors -= to_submit;
}
error = _blockdev_handle_data_single(core, split, &master_ctx);
error = blkdev_handle_data_single(bvol, split, &master_ctx);
if (error)
goto err;
}
_block_dev_complete_data(master_ctx.data, 0);
blkdev_complete_data_master(master_ctx.data, 0);
return;
@ -333,12 +329,12 @@ err:
if (split != bio)
bio_put(split);
if (master_ctx.data)
_block_dev_complete_data(master_ctx.data, error);
blkdev_complete_data_master(master_ctx.data, error);
else
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
}
static void block_dev_complete_discard(struct ocf_io *io, int error)
static void blkdev_complete_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
@ -346,13 +342,14 @@ static void block_dev_complete_discard(struct ocf_io *io, int error)
ocf_io_put(io);
}
static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), OCF_WRITE, 0,
CAS_CLEAR_FLUSH(CAS_BIO_OP_FLAGS(bio)));
@ -364,23 +361,23 @@ static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
return;
}
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_discard);
ocf_io_set_cmpl(io, bio, NULL, blkdev_complete_discard);
ocf_core_submit_discard(io);
ocf_volume_submit_discard(io);
}
static void _blockdev_handle_bio_noflush(ocf_core_t core, struct bio *bio)
static void blkdev_handle_bio_noflush(struct bd_object *bvol, struct bio *bio)
{
if (CAS_IS_DISCARD(bio))
_blockdev_handle_discard(core, bio);
blkdev_handle_discard(bvol, bio);
else
_blockdev_handle_data(core, bio);
blkdev_handle_data(bvol, bio);
}
static void block_dev_complete_flush(struct ocf_io *io, int error)
static void blkdev_complete_flush(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
ocf_core_t core = io->priv2;
struct bd_object *bvol = io->priv2;
ocf_io_put(io);
@ -391,19 +388,20 @@ static void block_dev_complete_flush(struct ocf_io *io, int error)
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio_noflush);
blkdev_defer_bio(bvol, bio, blkdev_handle_bio_noflush);
else
_blockdev_handle_bio_noflush(core, bio);
blkdev_handle_bio_noflush(bvol, bio);
}
static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio)
{
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0));
io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0,
CAS_SET_FLUSH(0));
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
@ -411,65 +409,112 @@ static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
return;
}
ocf_io_set_cmpl(io, bio, core, block_dev_complete_flush);
ocf_io_set_cmpl(io, bio, bvol, blkdev_complete_flush);
ocf_core_submit_flush(io);
ocf_volume_submit_flush(io);
}
static void _blockdev_handle_bio(ocf_core_t core, struct bio *bio)
static void blkdev_handle_bio(struct bd_object *bvol, struct bio *bio)
{
if (CAS_IS_SET_FLUSH(CAS_BIO_OP_FLAGS(bio)))
_blkdev_handle_flush(core, bio);
blkdev_handle_flush(bvol, bio);
else
_blockdev_handle_bio_noflush(core, bio);
blkdev_handle_bio_noflush(bvol, bio);
}
static void _blockdev_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
static void blkdev_submit_bio(struct bd_object *bvol, struct bio *bio)
{
ocf_core_t core = private;
BUG_ON(!core);
if (_blkdev_can_hndl_bio(bio)) {
if (blkdev_can_hndl_bio(bio)) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOTSUPP));
return;
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio);
blkdev_defer_bio(bvol, bio, blkdev_handle_bio);
else
_blockdev_handle_bio(core, bio);
blkdev_handle_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {
.set_geometry = _blockdev_set_geometry,
.submit_bio = _blockdev_submit_bio,
static void blkdev_core_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_core_t core = private;
struct bd_object *bvol;
BUG_ON(!core);
bvol = bd_object(ocf_core_get_volume(core));
blkdev_submit_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops kcas_core_exp_obj_ops = {
.set_geometry = blkdev_core_set_geometry,
.submit_bio = blkdev_core_submit_bio,
};
/**
* @brief this routine actually adds /dev/casM-N inode
*/
int block_dev_activate_exported_object(ocf_core_t core)
static int blkdev_cache_set_geometry(struct casdsk_disk *dsk, void *private)
{
int ret;
ocf_volume_t obj = ocf_core_get_volume(core);
ocf_cache_t cache = ocf_core_get_cache(core);
struct bd_object *bvol = bd_object(obj);
ocf_cache_t cache;
ocf_volume_t volume;
struct bd_object *bvol;
struct request_queue *cache_q, *exp_q;
struct block_device *bd;
sector_t sectors;
ret = casdisk_functions.casdsk_exp_obj_activate(bvol->dsk);
if (ret) {
if (-EEXIST == ret)
ret = KCAS_ERR_FILE_EXISTS;
printk(KERN_ERR "Cannot activate exported object, %s.%s. "
"Error code %d\n", ocf_cache_get_name(cache),
ocf_core_get_name(core), ret);
BUG_ON(!private);
cache = private;
volume = ocf_cache_get_volume(cache);
bvol = bd_object(volume);
bd = casdisk_functions.casdsk_disk_get_blkdev(bvol->dsk);
BUG_ON(!bd);
cache_q = bd->bd_disk->queue;
exp_q = casdisk_functions.casdsk_exp_obj_get_queue(dsk);
sectors = ocf_volume_get_length(volume) >> SECTOR_SHIFT;
set_capacity(casdisk_functions.casdsk_exp_obj_get_gendisk(dsk), sectors);
cas_copy_queue_limits(exp_q, cache_q, cache_q);
blk_stack_limits(&exp_q->limits, &cache_q->limits, 0);
/* We don't want to receive splitted requests*/
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
cas_set_queue_flush_fua(exp_q, CAS_CHECK_QUEUE_FLUSH(cache_q),
CAS_CHECK_QUEUE_FUA(cache_q));
return 0;
}
return ret;
static void blkdev_cache_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_cache_t cache = private;
struct bd_object *bvol;
BUG_ON(!cache);
bvol = bd_object(ocf_cache_get_volume(cache));
blkdev_submit_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops kcas_cache_exp_obj_ops = {
.set_geometry = blkdev_cache_set_geometry,
.submit_bio = blkdev_cache_submit_bio,
};
/****************************************
* Exported object management functions *
****************************************/
static const char *get_cache_id_string(ocf_cache_t cache)
{
return ocf_cache_get_name(cache) + sizeof("cache") - 1;
@ -480,37 +525,31 @@ static const char *get_core_id_string(ocf_core_t core)
return ocf_core_get_name(core) + sizeof("core") - 1;
}
int block_dev_create_exported_object(ocf_core_t core)
static int kcas_volume_create_exported_object(ocf_volume_t volume,
const char *name, void *priv, struct casdsk_exp_obj_ops *ops)
{
ocf_volume_t obj = ocf_core_get_volume(core);
ocf_cache_t cache = ocf_core_get_cache(core);
struct bd_object *bvol = bd_object(obj);
const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(obj);
struct bd_object *bvol = bd_object(volume);
const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume);
char dev_name[DISK_NAME_LEN];
struct casdsk_disk *dsk;
int result;
snprintf(dev_name, DISK_NAME_LEN, "cas%s-%s",
get_cache_id_string(cache),
get_core_id_string(core));
dsk = casdisk_functions.casdsk_disk_claim(uuid->data, core);
dsk = casdisk_functions.casdsk_disk_claim(uuid->data, priv);
if (dsk != bvol->dsk) {
result = -KCAS_ERR_SYSTEM;
goto end;
}
bvol->expobj_wq = alloc_workqueue("expobj_wq%s-%s",
bvol->expobj_wq = alloc_workqueue("expobj_wq_%s",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0,
get_cache_id_string(cache),
get_core_id_string(core));
name);
if (!bvol->expobj_wq) {
result = -ENOMEM;
goto end;
}
result = casdisk_functions.casdsk_exp_obj_create(dsk, dev_name,
THIS_MODULE, &_blockdev_exp_obj_ops);
result = casdisk_functions.casdsk_exp_obj_create(dsk, name,
THIS_MODULE, ops);
if (result) {
destroy_workqueue(bvol->expobj_wq);
goto end;
@ -526,34 +565,127 @@ end:
return result;
}
int block_dev_destroy_exported_object(ocf_core_t core)
static int kcas_volume_destroy_exported_object(ocf_volume_t volume)
{
int ret = 0;
ocf_volume_t obj = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(obj);
struct bd_object *bvol = bd_object(volume);
int result;
if (!bvol->expobj_valid)
return 0;
ret = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk);
if (ret) {
if (-EBUSY == ret)
ret = -KCAS_ERR_DEV_PENDING;
return ret;
}
result = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk);
if (result == -EBUSY)
return -KCAS_ERR_DEV_PENDING;
else if (result)
return result;
ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
if (!ret)
result = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
if (!result)
bvol->expobj_valid = false;
destroy_workqueue(bvol->expobj_wq);
casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk);
return ret;
return result;
}
static int _block_dev_lock_exported_object(ocf_core_t core, void *cntx)
/**
* @brief this routine actually adds /dev/casM-N inode
*/
static int kcas_volume_activate_exported_object(ocf_volume_t volume,
struct casdsk_exp_obj_ops *ops)
{
struct bd_object *bvol = bd_object(volume);
int result;
result = casdisk_functions.casdsk_exp_obj_activate(bvol->dsk);
if (result == -EEXIST)
result = -KCAS_ERR_FILE_EXISTS;
return result;
}
int kcas_core_create_exported_object(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
char dev_name[DISK_NAME_LEN];
snprintf(dev_name, DISK_NAME_LEN, "cas%s-%s",
get_cache_id_string(cache),
get_core_id_string(core));
bvol->front_volume = ocf_core_get_front_volume(core);
return kcas_volume_create_exported_object(volume, dev_name, core,
&kcas_core_exp_obj_ops);
}
int kcas_core_destroy_exported_object(ocf_core_t core)
{
ocf_volume_t volume = ocf_core_get_volume(core);
return kcas_volume_destroy_exported_object(volume);
}
int kcas_core_activate_exported_object(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t volume = ocf_core_get_volume(core);
int result;
result = kcas_volume_activate_exported_object(volume,
&kcas_core_exp_obj_ops);
if (result) {
printk(KERN_ERR "Cannot activate exported object, %s.%s. "
"Error code %d\n", ocf_cache_get_name(cache),
ocf_core_get_name(core), result);
}
return result;
}
int kcas_cache_create_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
struct bd_object *bvol = bd_object(volume);
char dev_name[DISK_NAME_LEN];
snprintf(dev_name, DISK_NAME_LEN, "cas-cache-%s",
get_cache_id_string(cache));
bvol->front_volume = ocf_cache_get_front_volume(cache);
return kcas_volume_create_exported_object(volume, dev_name, cache,
&kcas_cache_exp_obj_ops);
}
int kcas_cache_destroy_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
return kcas_volume_destroy_exported_object(volume);
}
int kcas_cache_activate_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
int result;
result = kcas_volume_activate_exported_object(volume,
&kcas_cache_exp_obj_ops);
if (result) {
printk(KERN_ERR "Cannot activate cache %s exported object. "
"Error code %d\n", ocf_cache_get_name(cache),
result);
}
return result;
}
static int kcas_core_lock_exported_object(ocf_core_t core, void *cntx)
{
int result;
struct bd_object *bvol = bd_object(
@ -575,7 +707,7 @@ static int _block_dev_lock_exported_object(ocf_core_t core, void *cntx)
return 0;
}
static int _block_dev_unlock_exported_object(ocf_core_t core, void *cntx)
static int kcas_core_unlock_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
@ -588,7 +720,7 @@ static int _block_dev_unlock_exported_object(ocf_core_t core, void *cntx)
return 0;
}
static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
static int kcas_core_stop_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
@ -613,31 +745,31 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
return 0;
}
static int _block_dev_free_exported_object(ocf_core_t core, void *cntx)
static int kcas_core_free_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
struct bd_object *bvol = bd_object(ocf_core_get_volume(core));
casdisk_functions.casdsk_exp_obj_free(bvol->dsk);
return 0;
}
int block_dev_destroy_all_exported_objects(ocf_cache_t cache)
int kcas_cache_destroy_all_core_exported_objects(ocf_cache_t cache)
{
int result;
/* Try lock exported objects */
result = ocf_core_visit(cache, _block_dev_lock_exported_object, NULL,
result = ocf_core_visit(cache, kcas_core_lock_exported_object, NULL,
true);
if (result) {
/* Failure, unlock already locked exported objects */
ocf_core_visit(cache, _block_dev_unlock_exported_object, NULL,
ocf_core_visit(cache, kcas_core_unlock_exported_object, NULL,
true);
return result;
}
ocf_core_visit(cache, _block_dev_stop_exported_object, NULL, true);
ocf_core_visit(cache, kcas_core_stop_exported_object, NULL, true);
ocf_core_visit(cache, kcas_core_free_exported_object, NULL, true);
return ocf_core_visit(cache, _block_dev_free_exported_object, NULL,
true);
return 0;
}

View File

@ -6,11 +6,15 @@
#ifndef __VOL_BLOCK_DEV_TOP_H__
#define __VOL_BLOCK_DEV_TOP_H__
int block_dev_activate_exported_object(ocf_core_t core);
int block_dev_create_exported_object(ocf_core_t core);
int kcas_core_create_exported_object(ocf_core_t core);
int kcas_core_destroy_exported_object(ocf_core_t core);
int kcas_core_activate_exported_object(ocf_core_t core);
int block_dev_destroy_all_exported_objects(ocf_cache_t cache);
int block_dev_destroy_exported_object(ocf_core_t core);
int kcas_cache_destroy_all_core_exported_objects(ocf_cache_t cache);
int kcas_cache_create_exported_object(ocf_cache_t cache);
int kcas_cache_destroy_exported_object(ocf_cache_t cache);
int kcas_cache_activate_exported_object(ocf_cache_t cache);
#endif /* __VOL_BLOCK_DEV_TOP_H__ */

View File

@ -36,6 +36,8 @@
#define CACHE_INIT_NEW 0 /**< initialize new metadata from fresh start */
#define CACHE_INIT_LOAD 1 /**< load existing metadata */
#define CACHE_INIT_BIND 2 /**< bind existing metadata */
#define CACHE_INIT_ACTIVATE 3 /**< activate cache after bind */
struct kcas_start_cache {
/**

2
ocf

@ -1 +1 @@
Subproject commit 865d29d0cb93a71ce37a8410914c35005aa6ed54
Subproject commit 886c8d4e31e160f36d5ca3d0698a79811c8c0eca