Handle all io on bio handler

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2021-05-17 17:02:38 +02:00
parent fbc3906576
commit 7343cb55fa
14 changed files with 374 additions and 180 deletions

View File

@ -0,0 +1,34 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct bio *bio; bio->bi_disk;" "linux/blk_types.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct bio *bio; bio->bi_bdev;" "linux/blk_types.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_BIO_GET_GENDISK(bio) (bio->bi_disk)" ;;
"2")
add_define "CAS_BIO_GET_GENDISK(bio) (bio->bi_bdev->bd_disk)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -9,12 +9,15 @@
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
if compile_module $cur_name "BIO_FLUSH" "linux/bio.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "REQ_FLUSH" "linux/blk_types.h"
then
echo $cur_name "2" >> $config_file_path
elif compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
@ -23,15 +26,26 @@ check() {
apply() {
case "$1" in
"1")
add_define "CAS_REQ_FLUSH \\
REQ_PREFLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & BIO_FLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | BIO_FLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~BIO_FLUSH)" ;;
"2")
add_define "CAS_REQ_FLUSH \\
REQ_FLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & REQ_FLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | REQ_FLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~REQ_FLUSH)" ;;
"3")
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & REQ_PREFLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | REQ_PREFLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~REQ_PREFLUSH)" ;;
*)
exit 1
esac

View File

@ -12,8 +12,11 @@ check() {
if compile_module $cur_name "blk_queue_make_request" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
elif compile_module $cur_name "struct request_queue *q; q->make_request_fn;" "linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "3" >> $config_file_path
fi
}
@ -33,6 +36,13 @@ apply() {
{
q->make_request_fn = mfn;
}" ;;
"3")
add_define "make_request_fn void"
add_function "
static inline void cas_blk_queue_make_request(struct request_queue *q,
make_request_fn *mfn)
{
}" ;;
*)
exit 1
esac

View File

@ -0,0 +1,40 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "REQ_FLUSH" "linux/blk_types.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_REQ_FLUSH \\
REQ_PREFLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
"2")
add_define "CAS_REQ_FLUSH \\
REQ_FLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct block_device_operations *ops; ops->submit_bio;" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_SET_SUBMIT_BIO(_fn) .submit_bio = _fn," ;;
"2")
add_define "CAS_SET_SUBMIT_BIO(_fn)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -35,7 +35,7 @@ apply() {
}" ;;
"2")
add_define "CAS_CHECK_QUEUE_FLUSH(q) \\
((q)->flush_flags & CAS_REQ_FLUSH)"
CAS_IS_SET_FLUSH((q)->flush_flags)"
add_define "CAS_CHECK_QUEUE_FUA(q) \\
((q)->flush_flags & REQ_FUA)"
add_function "static inline void cas_set_queue_flush_fua(struct request_queue *q,
@ -43,7 +43,7 @@ apply() {
{
unsigned int flags = 0;
if (flush)
flags |= CAS_REQ_FLUSH;
flags = CAS_SET_FLUSH(flags);
if (fua)
flags |= REQ_FUA;
if (flags)

View File

@ -31,8 +31,11 @@ struct bd_object {
atomic64_t pending_rqs;
/*!< This fields describes in flight IO requests */
struct workqueue_struct *workqueue;
/*< Workqueue for internally trigerred I/O */
struct workqueue_struct *btm_wq;
/*< Workqueue for I/O internally trigerred in bottom vol */
struct workqueue_struct *expobj_wq;
/*< Workqueue for I/O handled by top vol */
};
static inline struct bd_object *bd_object(ocf_volume_t vol)

View File

@ -903,8 +903,7 @@ void cas_atomic_submit_io(struct ocf_io *io)
{
CAS_DEBUG_TRACE();
if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) &&
CAS_IS_WRITE_FLUSH(io->flags)) {
if (CAS_IS_SET_FLUSH(io->flags)) {
/* FLUSH */
cas_atomic_submit_flush(io);
return;
@ -953,8 +952,8 @@ void cas_atomic_close_object(ocf_volume_t volume)
{
struct bd_object *bdobj = bd_object(volume);
if(bdobj->workqueue)
destroy_workqueue(bdobj->workqueue);
if(bdobj->btm_wq)
destroy_workqueue(bdobj->btm_wq);
block_dev_close_object(volume);
}
@ -976,8 +975,8 @@ int cas_atomic_open_object(ocf_volume_t volume, void *volume_params)
memcpy(&bdobj->atomic_params, volume_params,
sizeof(bdobj->atomic_params));
bdobj->workqueue = create_workqueue("CAS_AT_ZER");
if (!bdobj->workqueue) {
bdobj->btm_wq = create_workqueue("CAS_AT_ZER");
if (!bdobj->btm_wq) {
cas_atomic_close_object(volume);
result = -ENOMEM;
goto end;
@ -1036,7 +1035,7 @@ static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error)
_cas_atomic_write_zeroes_end(ctx, error);
} else {
/* submit next IO from work context */
queue_work(bdobj->workqueue, &ctx->cmpl_work);
queue_work(bdobj->btm_wq, &ctx->cmpl_work);
}
}

View File

@ -267,7 +267,7 @@ static void block_dev_submit_flush(struct ocf_io *io)
bio->bi_private = io;
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(CAS_WRITE_FLUSH, bio);
cas_submit_bio(CAS_SET_FLUSH(0), bio);
out:
cas_bd_io_end(io, blkio->error);
@ -412,8 +412,7 @@ static void block_dev_submit_io(struct ocf_io *io)
uint32_t bytes = io->bytes;
int dir = io->dir;
if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) &&
CAS_IS_WRITE_FLUSH(io->flags)) {
if (CAS_IS_SET_FLUSH(io->flags)) {
CAS_DEBUG_MSG("Flush request");
/* It is flush requests handle it */
block_dev_submit_flush(io);

View File

@ -73,34 +73,13 @@ static inline void _blockdev_end_io_acct(struct bio *bio,
cas_generic_end_io_acct(gd->queue, bio, &gd->part0, start_time);
}
void block_dev_start_bio_fast(struct ocf_io *io)
void block_dev_start_bio(struct ocf_io *io)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
data->start_time = _blockdev_start_io_acct(bio);
}
void block_dev_complete_bio_fast(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
_blockdev_end_io_acct(bio, data->start_time);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
cas_free_blk_data(data);
}
void block_dev_complete_bio_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
}
void block_dev_complete_rq(struct ocf_io *io, int error)
{
@ -507,23 +486,6 @@ static inline int _blkdev_can_hndl_bio(struct bio *bio)
return 0;
}
static inline bool _blkdev_is_flush_fua_bio(struct bio *bio)
{
if (CAS_IS_WRITE_FLUSH_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH and FUA */
return true;
} else if (CAS_IS_WRITE_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FUA */
return true;
} else if (CAS_IS_WRITE_FLUSH(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH */
return true;
}
return false;
}
void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
{
#ifdef CAS_FLUSH_SUPPORTED
@ -670,10 +632,125 @@ static void _blockdev_pending_req_dec(struct casdsk_disk *dsk, void *private)
atomic64_dec(&bvol->pending_rqs);
}
static void _blockdev_make_request_discard(struct casdsk_disk *dsk,
struct request_queue *q, struct bio *bio, void *private)
struct defer_bio_context {
struct work_struct io_work;
void (*cb)(ocf_core_t core, struct bio *bio);
ocf_core_t core;
struct bio *bio;
};
static void _blockdev_defer_bio_work(struct work_struct *work)
{
struct defer_bio_context *context;
context = container_of(work, struct defer_bio_context, io_work);
context->cb(context->core, context->bio);
kfree(context);
}
static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
void (*cb)(ocf_core_t core, struct bio *bio))
{
struct defer_bio_context *context;
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
BUG_ON(!bvol->expobj_wq);
context = kmalloc(sizeof(*context), GFP_ATOMIC);
if (!context) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
context->cb = cb;
context->bio = bio;
context->core = core;
INIT_WORK(&context->io_work, _blockdev_defer_bio_work);
queue_work(bvol->expobj_wq, &context->io_work);
}
static void block_dev_complete_data(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
_blockdev_end_io_acct(bio, data->start_time);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
cas_free_blk_data(data);
}
static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
{
ocf_cache_t cache;
struct cache_priv *cache_priv;
struct ocf_io *io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
cache = ocf_core_get_cache(core);
cache_priv = ocf_cache_get_priv(cache);
if (unlikely(CAS_BIO_BISIZE(bio) == 0)) {
CAS_PRINT_RL(KERN_ERR
"Not able to handle empty BIO, flags = "
CAS_BIO_OP_FLAGS_FORMAT "\n", CAS_BIO_OP_FLAGS(bio));
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return;
}
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
_blockdev_set_bio_data(data, bio);
data->master_io_req = bio;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), CAS_CLEAR_FLUSH(flags));
if (!io) {
printk(KERN_CRIT "Out of memory. Ending IO processing.\n");
cas_free_blk_data(data);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
ret = ocf_io_set_data(io, data, 0);
if (ret < 0) {
ocf_io_put(io);
cas_free_blk_data(data);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return;
}
ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_data);
ocf_io_set_start(io, block_dev_start_bio);
ocf_core_submit_io(io);
}
static void block_dev_complete_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
}
static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
{
ocf_core_t core = private;
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
@ -689,105 +766,89 @@ static void _blockdev_make_request_discard(struct casdsk_disk *dsk,
return;
}
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_bio_discard);
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_discard);
ocf_core_submit_discard(io);
}
static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
struct request_queue *q, struct bio *bio, void *private)
static void _blockdev_handle_bio_noflush(ocf_core_t core, struct bio *bio)
{
ocf_core_t core;
ocf_cache_t cache;
struct cache_priv *cache_priv;
struct ocf_io *io;
struct blk_data *data;
int ret;
if (CAS_IS_DISCARD(bio))
_blockdev_handle_discard(core, bio);
else
_blockdev_handle_data(core, bio);
}
BUG_ON(!private);
core = private;
cache = ocf_core_get_cache(core);
cache_priv = ocf_cache_get_priv(cache);
static void block_dev_complete_flush(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
ocf_core_t core = io->priv2;
ocf_io_put(io);
if (CAS_BIO_BISIZE(bio) == 0 || error) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(error));
return;
}
if (in_interrupt())
return CASDSK_BIO_NOT_HANDLED;
_blockdev_defer_bio(core, bio, _blockdev_handle_bio_noflush);
else
_blockdev_handle_bio_noflush(core, bio);
}
if (_blkdev_can_hndl_bio(bio))
return CASDSK_BIO_HANDLED;
if (_blkdev_is_flush_fua_bio(bio))
return CASDSK_BIO_NOT_HANDLED;
if (CAS_IS_DISCARD(bio)) {
_blockdev_make_request_discard(dsk, q, bio, private);
return CASDSK_BIO_HANDLED;
}
if (unlikely(CAS_BIO_BISIZE(bio) == 0)) {
CAS_PRINT_RL(KERN_ERR
"Not able to handle empty BIO, flags = "
CAS_BIO_OP_FLAGS_FORMAT "\n", CAS_BIO_OP_FLAGS(bio));
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return CASDSK_BIO_HANDLED;
}
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return CASDSK_BIO_HANDLED;
}
_blockdev_set_bio_data(data, bio);
data->master_io_req = bio;
static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
{
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), 0);
0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0));
if (!io) {
printk(KERN_CRIT "Out of memory. Ending IO processing.\n");
cas_free_blk_data(data);
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return CASDSK_BIO_HANDLED;
return;
}
ret = ocf_io_set_data(io, data, 0);
if (ret < 0) {
ocf_io_put(io);
cas_free_blk_data(data);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return CASDSK_BIO_HANDLED;
ocf_io_set_cmpl(io, bio, core, block_dev_complete_flush);
ocf_core_submit_flush(io);
}
static void _blockdev_handle_bio(ocf_core_t core, struct bio *bio)
{
if (CAS_IS_SET_FLUSH(CAS_BIO_OP_FLAGS(bio)))
_blkdev_handle_flush(core, bio);
else
_blockdev_handle_bio_noflush(core, bio);
}
static void _blockdev_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_core_t core = private;
BUG_ON(!core);
if (_blkdev_can_hndl_bio(bio)) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOTSUPP));
return;
}
ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_bio_fast);
ocf_io_set_start(io, block_dev_start_bio_fast);
ret = ocf_core_submit_io_fast(io);
if (ret < 0)
goto err;
return CASDSK_BIO_HANDLED;
err:
/*
* - Not able to processed fast path for this BIO,
* - Cleanup current request
* - Put it to the IO scheduler
*/
ocf_io_put(io);
cas_free_blk_data(data);
return CASDSK_BIO_NOT_HANDLED;
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio);
else
_blockdev_handle_bio(core, bio);
}
static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {
.set_geometry = _blockdev_set_geometry,
.make_request_fn = _blockdev_make_request_fast,
.queue_rq_fn = _block_dev_queue_request,
.submit_bio = _blockdev_submit_bio,
.pending_rq_inc = _blockdev_pending_req_inc,
.pending_rq_dec = _blockdev_pending_req_dec,
};
@ -866,10 +927,23 @@ int block_dev_create_exported_object(ocf_core_t core)
return 0;
}
bvol->expobj_wq = alloc_workqueue("expobj_wq%s-%s",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0,
get_cache_id_string(cache),
get_core_id_string(core));
if (!bvol->expobj_wq) {
result = -ENOMEM;
goto end;
}
result = casdisk_functions.casdsk_exp_obj_create(dsk, dev_name,
THIS_MODULE, &_blockdev_exp_obj_ops);
if (!result)
bvol->expobj_valid = true;
if (result) {
destroy_workqueue(bvol->expobj_wq);
goto end;
}
bvol->expobj_valid = true;
end:
if (result) {
@ -899,6 +973,8 @@ int block_dev_destroy_exported_object(ocf_core_t core)
if (!bvol->expobj_valid)
return 0;
destroy_workqueue(bvol->expobj_wq);
ret = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk);
if (ret) {
if (-EBUSY == ret)
@ -907,11 +983,11 @@ int block_dev_destroy_exported_object(ocf_core_t core)
}
ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk);
if (!ret)
bvol->expobj_valid = false;
casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk);
return ret;
}
@ -954,6 +1030,7 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
int ret;
if (bvol->expobj_valid) {
BUG_ON(!bvol->expobj_locked);
@ -961,8 +1038,9 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
printk(KERN_INFO "Stopping device %s\n",
casdisk_functions.casdsk_exp_obj_get_gendisk(bvol->dsk)->disk_name);
casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
bvol->expobj_valid = false;
ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
if (!ret)
bvol->expobj_valid = false;
}
if (bvol->expobj_locked) {

View File

@ -11,13 +11,10 @@
/**
* Version of cas_disk interface
*/
#define CASDSK_IFACE_VERSION 2
#define CASDSK_IFACE_VERSION 3
struct casdsk_disk;
#define CASDSK_BIO_NOT_HANDLED 0
#define CASDSK_BIO_HANDLED 1
struct casdsk_exp_obj_ops {
/**
@ -41,14 +38,11 @@ struct casdsk_exp_obj_ops {
int (*set_geometry)(struct casdsk_disk *dsk, void *private);
/**
* @brief make_request_fn of exported object (top) block device.
* @brief submit_bio of exported object (top) block device.
* Called by cas_disk when cas_disk device is in attached mode.
*
* @return casdsk_BIO_HANDLED when bio was handled.
* Otherwise casdsk_BIO_NOT_HANDLED. In this case bio will be submitted
* to I/O scheduler and should be handled by request_fn.
*/
int (*make_request_fn)(struct casdsk_disk *dsk, struct request_queue *q,
void (*submit_bio)(struct casdsk_disk *dsk,
struct bio *bio, void *private);
/**

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kobject.h>
#include <linux/blkdev.h>
struct casdsk_stored_config {
size_t n_blobs;

View File

@ -67,17 +67,9 @@ void casdsk_deinit_exp_objs(void)
}
static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
int status = CASDSK_BIO_NOT_HANDLED;
if (likely(dsk->exp_obj->ops->make_request_fn))
status = dsk->exp_obj->ops->
make_request_fn(dsk, q, bio, dsk->private);
if (status == CASDSK_BIO_NOT_HANDLED)
cas_call_default_mk_request_fn(dsk->exp_obj->mk_rq_fn, q, bio);
dsk->exp_obj->ops->submit_bio(dsk, bio, dsk->private);
}
CAS_DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio,
@ -102,7 +94,6 @@ CAS_DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio,
}
static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
struct bio *cloned_bio;
@ -133,13 +124,12 @@ static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk,
}
static inline void _casdsk_exp_obj_handle_bio(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
if (likely(casdsk_disk_is_attached(dsk)))
_casdsk_exp_obj_handle_bio_att(dsk, q, bio);
_casdsk_exp_obj_handle_bio_att(dsk, bio);
else if (casdsk_disk_is_pt(dsk))
_casdsk_exp_obj_handle_bio_pt(dsk, q, bio);
_casdsk_exp_obj_handle_bio_pt(dsk, bio);
else if (casdsk_disk_is_shutdown(dsk))
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EIO));
else
@ -176,26 +166,29 @@ retry:
return cpu;
}
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q,
struct bio *bio)
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_submit_bio(struct bio *bio)
{
struct casdsk_disk *dsk;
unsigned int cpu;
BUG_ON(!bio);
BUG_ON(!q);
BUG_ON(!q->queuedata);
dsk = q->queuedata;
dsk = CAS_BIO_GET_GENDISK(bio)->private_data;
cpu = _casdsk_exp_obj_begin_rq(dsk);
_casdsk_exp_obj_handle_bio(dsk, q, bio);
_casdsk_exp_obj_handle_bio(dsk, bio);
_casdsk_exp_obj_end_rq(dsk, cpu);
KRETURN(0);
}
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q,
struct bio *bio)
{
return _casdsk_exp_obj_submit_bio(bio);
}
static int _casdsk_get_next_part_no(struct block_device *bd)
{
int part_no = 0;
@ -369,6 +362,7 @@ static void _casdsk_exp_obj_clear_dev_t(struct casdsk_disk *dsk)
static const struct block_device_operations _casdsk_exp_obj_ops = {
.owner = THIS_MODULE,
CAS_SET_SUBMIT_BIO(_casdsk_exp_obj_submit_bio)
};
static int casdsk_exp_obj_alloc(struct casdsk_disk *dsk)
@ -611,7 +605,6 @@ int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,
gd->private_data = dsk;
strlcpy(gd->disk_name, exp_obj->dev_name, sizeof(gd->disk_name));
dsk->exp_obj->mk_rq_fn = cas_get_default_mk_request_fn(queue);
cas_blk_queue_make_request(queue, _casdsk_exp_obj_make_rq_fn);
if (exp_obj->ops->set_geometry) {

View File

@ -28,8 +28,6 @@ struct casdsk_exp_obj {
struct casdsk_exp_obj_ops *ops;
make_request_fn *mk_rq_fn;
const char *dev_name;
struct kobject kobj;