Introduce cache exported object

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2021-07-29 21:27:21 +02:00
parent 1dd8f12242
commit c0110a4d01
3 changed files with 186 additions and 77 deletions

View File

@ -33,6 +33,9 @@ struct bd_object {
struct workqueue_struct *expobj_wq;
/*< Workqueue for I/O handled by top vol */
ocf_volume_t front_volume;
/*< Cache/core front volume */
};
static inline struct bd_object *bd_object(ocf_volume_t vol)

View File

@ -6,7 +6,7 @@
#include "cas_cache.h"
#include "utils/cas_err.h"
static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
static void blkdev_set_bio_data(struct blk_data *data, struct bio *bio)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
struct bio_vec *bvec;
@ -30,7 +30,7 @@ static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
#endif
}
static inline int _blkdev_can_hndl_bio(struct bio *bio)
static inline int blkdev_can_hndl_bio(struct bio *bio)
{
if (CAS_CHECK_BARRIER(bio)) {
CAS_PRINT_RL(KERN_WARNING
@ -42,7 +42,7 @@ static inline int _blkdev_can_hndl_bio(struct bio *bio)
return 0;
}
void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
void blkdev_set_exported_object_flush_fua(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t core_vol = ocf_core_get_volume(core);
@ -66,7 +66,7 @@ void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
cas_set_queue_flush_fua(exp_q, flush, fua);
}
static void _blockdev_set_discard_properties(ocf_cache_t cache,
static void blkdev_set_discard_properties(ocf_cache_t cache,
struct request_queue *exp_q, struct block_device *cache_bd,
struct block_device *core_bd, sector_t core_sectors)
{
@ -97,7 +97,7 @@ static void _blockdev_set_discard_properties(ocf_cache_t cache,
* Map geometry of underlying (core) object geometry (sectors etc.)
* to geometry of exported object.
*/
static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
static int blkdev_core_set_geometry(struct casdsk_disk *dsk, void *private)
{
ocf_core_t core;
ocf_cache_t cache;
@ -148,9 +148,9 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
/* We don't want to receive splitted requests*/
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
_blockdev_set_exported_object_flush_fua(core);
blkdev_set_exported_object_flush_fua(core);
_blockdev_set_discard_properties(cache, exp_q, cache_bd, core_bd,
blkdev_set_discard_properties(cache, exp_q, cache_bd, core_bd,
sectors);
return 0;
@ -158,26 +158,24 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
struct defer_bio_context {
struct work_struct io_work;
void (*cb)(ocf_core_t core, struct bio *bio);
ocf_core_t core;
void (*cb)(struct bd_object *bvol, struct bio *bio);
struct bd_object *bvol;
struct bio *bio;
};
static void _blockdev_defer_bio_work(struct work_struct *work)
static void blkdev_defer_bio_work(struct work_struct *work)
{
struct defer_bio_context *context;
context = container_of(work, struct defer_bio_context, io_work);
context->cb(context->core, context->bio);
context->cb(context->bvol, context->bio);
kfree(context);
}
static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
void (*cb)(ocf_core_t core, struct bio *bio))
static void blkdev_defer_bio(struct bd_object *bvol, struct bio *bio,
void (*cb)(struct bd_object *bvol, struct bio *bio))
{
struct defer_bio_context *context;
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
BUG_ON(!bvol->expobj_wq);
@ -190,12 +188,12 @@ static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
context->cb = cb;
context->bio = bio;
context->core = core;
INIT_WORK(&context->io_work, _blockdev_defer_bio_work);
context->bvol = bvol;
INIT_WORK(&context->io_work, blkdev_defer_bio_work);
queue_work(bvol->expobj_wq, &context->io_work);
}
static void _block_dev_complete_data(struct blk_data *master, int error)
static void blkdev_complete_data_master(struct blk_data *master, int error)
{
master->error = master->error ?: error;
@ -208,7 +206,7 @@ static void _block_dev_complete_data(struct blk_data *master, int error)
cas_free_blk_data(master);
}
static void block_dev_complete_data(struct ocf_io *io, int error)
static void blkdev_complete_data(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
struct blk_data *master = io->priv2;
@ -220,38 +218,36 @@ static void block_dev_complete_data(struct ocf_io *io, int error)
if (bio != master->bio)
bio_put(bio);
_block_dev_complete_data(master, error);
blkdev_complete_data_master(master, error);
}
struct blockdev_data_master_ctx {
struct blkdev_data_master_ctx {
struct blk_data *data;
struct bio *bio;
uint32_t master_size;
unsigned long long start_time;
};
static int _blockdev_handle_data_single(ocf_core_t core, struct bio *bio,
struct blockdev_data_master_ctx *master_ctx)
static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio,
struct blkdev_data_master_ctx *master_ctx)
{
ocf_cache_t cache;
struct cache_priv *cache_priv;
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
cache = ocf_core_get_cache(core);
cache_priv = ocf_cache_get_priv(cache);
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
return -ENOMEM;
}
_blockdev_set_bio_data(data, bio);
blkdev_set_bio_data(data, bio);
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
@ -280,21 +276,21 @@ static int _blockdev_handle_data_single(ocf_core_t core, struct bio *bio,
atomic_inc(&master_ctx->data->master_remaining);
ocf_io_set_cmpl(io, bio, master_ctx->data, block_dev_complete_data);
ocf_io_set_cmpl(io, bio, master_ctx->data, blkdev_complete_data);
ocf_core_submit_io(io);
ocf_volume_submit_io(io);
return 0;
}
static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
static void blkdev_handle_data(struct bd_object *bvol, struct bio *bio)
{
const uint32_t max_io_sectors = (32*MiB) >> SECTOR_SHIFT;
const uint32_t align_sectors = (128*KiB) >> SECTOR_SHIFT;
struct bio *split = NULL;
uint32_t sectors, to_submit;
int error;
struct blockdev_data_master_ctx master_ctx = {
struct blkdev_data_master_ctx master_ctx = {
.bio = bio,
.master_size = CAS_BIO_BISIZE(bio),
};
@ -320,12 +316,12 @@ static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
sectors -= to_submit;
}
error = _blockdev_handle_data_single(core, split, &master_ctx);
error = blkdev_handle_data_single(bvol, split, &master_ctx);
if (error)
goto err;
}
_block_dev_complete_data(master_ctx.data, 0);
blkdev_complete_data_master(master_ctx.data, 0);
return;
@ -333,12 +329,12 @@ err:
if (split != bio)
bio_put(split);
if (master_ctx.data)
_block_dev_complete_data(master_ctx.data, error);
blkdev_complete_data_master(master_ctx.data, error);
else
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
}
static void block_dev_complete_discard(struct ocf_io *io, int error)
static void blkdev_complete_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
@ -346,13 +342,14 @@ static void block_dev_complete_discard(struct ocf_io *io, int error)
ocf_io_put(io);
}
static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), OCF_WRITE, 0,
CAS_CLEAR_FLUSH(CAS_BIO_OP_FLAGS(bio)));
@ -364,23 +361,23 @@ static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
return;
}
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_discard);
ocf_io_set_cmpl(io, bio, NULL, blkdev_complete_discard);
ocf_core_submit_discard(io);
ocf_volume_submit_discard(io);
}
static void _blockdev_handle_bio_noflush(ocf_core_t core, struct bio *bio)
static void blkdev_handle_bio_noflush(struct bd_object *bvol, struct bio *bio)
{
if (CAS_IS_DISCARD(bio))
_blockdev_handle_discard(core, bio);
blkdev_handle_discard(bvol, bio);
else
_blockdev_handle_data(core, bio);
blkdev_handle_data(bvol, bio);
}
static void block_dev_complete_flush(struct ocf_io *io, int error)
static void blkdev_complete_flush(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
ocf_core_t core = io->priv2;
struct bd_object *bvol = io->priv2;
ocf_io_put(io);
@ -391,19 +388,20 @@ static void block_dev_complete_flush(struct ocf_io *io, int error)
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio_noflush);
blkdev_defer_bio(bvol, bio, blkdev_handle_bio_noflush);
else
_blockdev_handle_bio_noflush(core, bio);
blkdev_handle_bio_noflush(bvol, bio);
}
static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio)
{
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0));
io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0,
CAS_SET_FLUSH(0));
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
@ -411,41 +409,105 @@ static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
return;
}
ocf_io_set_cmpl(io, bio, core, block_dev_complete_flush);
ocf_io_set_cmpl(io, bio, bvol, blkdev_complete_flush);
ocf_core_submit_flush(io);
ocf_volume_submit_flush(io);
}
static void _blockdev_handle_bio(ocf_core_t core, struct bio *bio)
static void blkdev_handle_bio(struct bd_object *bvol, struct bio *bio)
{
if (CAS_IS_SET_FLUSH(CAS_BIO_OP_FLAGS(bio)))
_blkdev_handle_flush(core, bio);
blkdev_handle_flush(bvol, bio);
else
_blockdev_handle_bio_noflush(core, bio);
blkdev_handle_bio_noflush(bvol, bio);
}
static void _blockdev_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
static void blkdev_submit_bio(struct bd_object *bvol, struct bio *bio)
{
ocf_core_t core = private;
BUG_ON(!core);
if (_blkdev_can_hndl_bio(bio)) {
if (blkdev_can_hndl_bio(bio)) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOTSUPP));
return;
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio);
blkdev_defer_bio(bvol, bio, blkdev_handle_bio);
else
_blockdev_handle_bio(core, bio);
blkdev_handle_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {
.set_geometry = _blockdev_set_geometry,
.submit_bio = _blockdev_submit_bio,
static void blkdev_core_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_core_t core = private;
struct bd_object *bvol;
BUG_ON(!core);
bvol = bd_object(ocf_core_get_volume(core));
blkdev_submit_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops kcas_core_exp_obj_ops = {
.set_geometry = blkdev_core_set_geometry,
.submit_bio = blkdev_core_submit_bio,
};
static int blkdev_cache_set_geometry(struct casdsk_disk *dsk, void *private)
{
ocf_cache_t cache;
ocf_volume_t volume;
struct bd_object *bvol;
struct request_queue *cache_q, *exp_q;
struct block_device *bd;
sector_t sectors;
BUG_ON(!private);
cache = private;
volume = ocf_cache_get_volume(cache);
bvol = bd_object(volume);
bd = casdisk_functions.casdsk_disk_get_blkdev(bvol->dsk);
BUG_ON(!bd);
cache_q = bd->bd_disk->queue;
exp_q = casdisk_functions.casdsk_exp_obj_get_queue(dsk);
sectors = ocf_volume_get_length(volume) >> SECTOR_SHIFT;
set_capacity(casdisk_functions.casdsk_exp_obj_get_gendisk(dsk), sectors);
cas_copy_queue_limits(exp_q, cache_q, cache_q);
blk_stack_limits(&exp_q->limits, &cache_q->limits, 0);
/* We don't want to receive splitted requests*/
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
cas_set_queue_flush_fua(exp_q, CAS_CHECK_QUEUE_FLUSH(cache_q),
CAS_CHECK_QUEUE_FUA(cache_q));
return 0;
}
static void blkdev_cache_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_cache_t cache = private;
struct bd_object *bvol;
BUG_ON(!cache);
bvol = bd_object(ocf_cache_get_volume(cache));
blkdev_submit_bio(bvol, bio);
}
static struct casdsk_exp_obj_ops kcas_cache_exp_obj_ops = {
.set_geometry = blkdev_cache_set_geometry,
.submit_bio = blkdev_cache_submit_bio,
};
/****************************************
@ -548,14 +610,17 @@ int kcas_core_create_exported_object(ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
char dev_name[DISK_NAME_LEN];
snprintf(dev_name, DISK_NAME_LEN, "cas%s-%s",
get_cache_id_string(cache),
get_core_id_string(core));
bvol->front_volume = ocf_core_get_front_volume(core);
return kcas_volume_create_exported_object(volume, dev_name, core,
&_blockdev_exp_obj_ops);
&kcas_core_exp_obj_ops);
}
int kcas_core_destroy_exported_object(ocf_core_t core)
@ -572,8 +637,7 @@ int kcas_core_activate_exported_object(ocf_core_t core)
int result;
result = kcas_volume_activate_exported_object(volume,
&_blockdev_exp_obj_ops);
&kcas_core_exp_obj_ops);
if (result) {
printk(KERN_ERR "Cannot activate exported object, %s.%s. "
"Error code %d\n", ocf_cache_get_name(cache),
@ -583,6 +647,44 @@ int kcas_core_activate_exported_object(ocf_core_t core)
return result;
}
int kcas_cache_create_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
struct bd_object *bvol = bd_object(volume);
char dev_name[DISK_NAME_LEN];
snprintf(dev_name, DISK_NAME_LEN, "cas-cache-%s",
get_cache_id_string(cache));
bvol->front_volume = ocf_cache_get_front_volume(cache);
return kcas_volume_create_exported_object(volume, dev_name, cache,
&kcas_cache_exp_obj_ops);
}
int kcas_cache_destroy_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
return kcas_volume_destroy_exported_object(volume);
}
int kcas_cache_activate_exported_object(ocf_cache_t cache)
{
ocf_volume_t volume = ocf_cache_get_volume(cache);
int result;
result = kcas_volume_activate_exported_object(volume,
&kcas_cache_exp_obj_ops);
if (result) {
printk(KERN_ERR "Cannot activate cache %s exported object. "
"Error code %d\n", ocf_cache_get_name(cache),
result);
}
return result;
}
static int kcas_core_lock_exported_object(ocf_core_t core, void *cntx)
{
int result;

View File

@ -6,11 +6,15 @@
#ifndef __VOL_BLOCK_DEV_TOP_H__
#define __VOL_BLOCK_DEV_TOP_H__
int kcas_core_activate_exported_object(ocf_core_t core);
int kcas_core_create_exported_object(ocf_core_t core);
int kcas_core_destroy_exported_object(ocf_core_t core);
int kcas_core_activate_exported_object(ocf_core_t core);
int kcas_cache_destroy_all_core_exported_objects(ocf_cache_t cache);
int kcas_core_destroy_exported_object(ocf_core_t core);
int kcas_cache_create_exported_object(ocf_cache_t cache);
int kcas_cache_destroy_exported_object(ocf_cache_t cache);
int kcas_cache_activate_exported_object(ocf_cache_t cache);
#endif /* __VOL_BLOCK_DEV_TOP_H__ */