Merge pull request #1522 from robertbaldyga/forward_io

Implement forward_io interface
This commit is contained in:
Robert Baldyga 2024-09-20 18:42:35 +02:00 committed by GitHub
commit cf4a40e6c0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 190 additions and 533 deletions

View File

@ -1,34 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct bio *b;blk_rq_append_bio(NULL, &b);" "linux/blkdev.h"
then
echo $cur_name 1 >> $config_file_path
else
echo $cur_name 2 >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "cas_blk_rq_append_bio(rq, bounce_bio) \\
blk_rq_append_bio(rq, &bounce_bio)" ;;
"2")
add_define "cas_blk_rq_append_bio(rq, bounce_bio) \\
blk_rq_append_bio(rq, bounce_bio)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -1,6 +1,7 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies
# SPDX-License-Identifier: BSD-3-Clause
#
@ -25,15 +26,11 @@ apply() {
"1")
add_define "CAS_BIO_BISIZE(bio) \\
bio->bi_iter.bi_size"
add_define "CAS_BIO_BIIDX(bio) \\
bio->bi_iter.bi_idx"
add_define "CAS_BIO_BISECTOR(bio) \\
bio->bi_iter.bi_sector" ;;
"2")
add_define "CAS_BIO_BISIZE(bio) \\
bio->bi_size"
add_define "CAS_BIO_BIIDX(bio) \\
bio->bi_idx"
add_define "CAS_BIO_BISECTOR(bio) \\
bio->bi_sector" ;;
*)

View File

@ -1,34 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_mq_end_request(NULL, 0);" "linux/blk-mq.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "blk_end_request_all(NULL, 0);" "linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_END_REQUEST_ALL blk_mq_end_request" ;;
"2")
add_define "CAS_END_REQUEST_ALL blk_end_request_all" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -1,31 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "kallsyms_on_each_symbol(NULL, NULL);" "linux/fs.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "SYMBOL_LOOKUP_SUPPORTED 1" ;;
"2")
;;
*)
exit 1
esac
}
conf_run $@

View File

@ -1,33 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_queue_bounce(NULL, NULL);" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "cas_blk_queue_bounce(q, bounce_bio) \\
blk_queue_bounce(q, bounce_bio)" ;;
"2")
add_define "cas_blk_queue_bounce(q, bounce_bio) \\
({})" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -1,38 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
. $(dirname $3)/conf_framework.sh
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct request_queue *q; spin_lock_irq(q->queue_lock);"\
"linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct request_queue *q; spin_lock_irq(&q->queue_lock);"\
"linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(q->queue_lock)"
add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(q->queue_lock)" ;;
"2")
add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(&q->queue_lock)"
add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(&q->queue_lock)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -38,7 +38,7 @@ struct blk_data {
/**
* @brief CAS IO with which data is associated
*/
struct ocf_io *io;
ocf_io_t io;
/**
* @brief Timestamp of start processing request

View File

@ -37,12 +37,9 @@ static inline void bd_release_from_disk(struct block_device *bdev,
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
#define KRETURN(x) ({ return (x); })
#define MAKE_RQ_RET_TYPE blk_qc_t
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
#else
#define KRETURN(x) return
#define MAKE_RQ_RET_TYPE void
#else
#define KRETURN(x) ({ return (x); })
#define MAKE_RQ_RET_TYPE int
#endif
/* For RHEL 9.x we assume backport from kernel 5.18+ */

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -47,23 +48,6 @@
#include <linux/slab_def.h>
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 0, 0)
#include <generated/utsrelease.h>
#ifdef UTS_UBUNTU_RELEASE_ABI
#define CAS_UBUNTU
#endif
#endif
/*
* For 8KB process kernel stack check if request is not continous and
* submit each bio as separate request. This prevent nvme driver from
* splitting requests.
* For large requests, nvme splitting causes stack overrun.
*/
#if THREAD_SIZE <= 8192
#define RQ_CHECK_CONTINOUS
#endif
#ifndef SHRT_MIN
#define SHRT_MIN ((s16)-32768)
#endif
@ -74,16 +58,6 @@
#define ENOTSUP ENOTSUPP
#ifdef RHEL_RELEASE_VERSION
#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 3)
#define CAS_RHEL_73
#endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
#define CAS_GARBAGE_COLLECTOR
#endif
/* rate-limited printk */
#define CAS_PRINT_RL(...) \
if (printk_ratelimit()) \

View File

@ -192,42 +192,6 @@ uint32_t cas_io_iter_zero(struct bio_vec_iter *dst, uint32_t bytes)
return zeroed;
}
/*
*
*/
int cas_blk_io_set_data(struct ocf_io *io,
ctx_data_t *ctx_data, uint32_t offset)
{
struct blkio *blkio = cas_io_to_blkio(io);
struct blk_data *data = ctx_data;
/* Set BIO vector (IO data) and initialize iterator */
blkio->data = data;
if (blkio->data) {
cas_io_iter_init(&blkio->iter, blkio->data->vec,
blkio->data->size);
/* Move into specified offset in BIO vector iterator */
if (offset != cas_io_iter_move(&blkio->iter, offset)) {
/* TODO Log message */
blkio->error = -ENOBUFS;
return -ENOBUFS;
}
}
return 0;
}
/*
*
*/
ctx_data_t *cas_blk_io_get_data(struct ocf_io *io)
{
struct blkio *blkio = cas_io_to_blkio(io);
return blkio->data;
}
int cas_blk_open_volume_by_bdev(ocf_volume_t *vol, struct block_device *bdev)
{
struct bd_object *bdobj;

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,27 +10,6 @@
#include "obj_blk.h"
#include "context.h"
struct blkio {
int error;
atomic_t rq_remaning;
atomic_t ref_counter;
int32_t dir;
struct blk_data *data; /* IO data buffer */
/* BIO vector iterator for sending IO */
struct bio_vec_iter iter;
};
static inline struct blkio *cas_io_to_blkio(struct ocf_io *io)
{
return ocf_io_get_priv(io);
}
int cas_blk_io_set_data(struct ocf_io *io, ctx_data_t *data,
uint32_t offset);
ctx_data_t *cas_blk_io_get_data(struct ocf_io *io);
int cas_blk_open_volume_by_bdev(ocf_volume_t *vol,
struct block_device *bdev);
void cas_blk_close_volume(ocf_volume_t vol);

View File

@ -86,15 +86,15 @@ static uint64_t block_dev_get_byte_length(ocf_volume_t vol)
*
*/
static inline struct bio *cas_bd_io_alloc_bio(struct block_device *bdev,
struct blkio *bdio)
struct bio_vec_iter *iter)
{
struct bio *bio
= cas_bio_alloc(bdev, GFP_NOIO, cas_io_iter_size_left(&bdio->iter));
= cas_bio_alloc(bdev, GFP_NOIO, cas_io_iter_size_left(iter));
if (bio)
return bio;
if (cas_io_iter_size_left(&bdio->iter) < MAX_LINES_PER_IO) {
if (cas_io_iter_size_left(iter) < MAX_LINES_PER_IO) {
/* BIO vector was small, so it was memory
* common problem - NO RAM!!!
*/
@ -105,211 +105,6 @@ static inline struct bio *cas_bd_io_alloc_bio(struct block_device *bdev,
return cas_bio_alloc(bdev, GFP_NOIO, MAX_LINES_PER_IO);
}
/*
*
*/
static void cas_bd_io_end(struct ocf_io *io, int error)
{
struct blkio *bdio = cas_io_to_blkio(io);
if (error)
bdio->error |= error;
if (atomic_dec_return(&bdio->rq_remaning))
return;
CAS_DEBUG_MSG("Completion");
/* Send completion to caller */
io->end(io, bdio->error);
}
/*
*
*/
CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
unsigned int bytes_done, int error)
{
struct ocf_io *io;
struct blkio *bdio;
struct bd_object *bdobj;
int err;
BUG_ON(!bio);
BUG_ON(!bio->bi_private);
CAS_BLOCK_CALLBACK_INIT(bio);
io = bio->bi_private;
bdobj = bd_object(ocf_io_get_volume(io));
BUG_ON(!bdobj);
err = CAS_BLOCK_CALLBACK_ERROR(bio, error);
bdio = cas_io_to_blkio(io);
BUG_ON(!bdio);
CAS_DEBUG_TRACE();
if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
err = 0;
cas_bd_io_end(io, err);
bio_put(bio);
CAS_BLOCK_CALLBACK_RETURN();
}
static void block_dev_submit_flush(struct ocf_io *io)
{
struct blkio *blkio = cas_io_to_blkio(io);
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
struct block_device *bdev = bdobj->btm_bd;
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
/* Prevent races of completing IO */
atomic_set(&blkio->rq_remaning, 1);
if (q == NULL) {
/* No queue, error */
blkio->error = -EINVAL;
goto out;
}
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush, call back */
goto out;
}
bio = cas_bio_alloc(bdev, GFP_NOIO, 0);
if (bio == NULL) {
CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for BIO\n");
blkio->error = -ENOMEM;
goto out;
}
blkio->dir = io->dir;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
CAS_BIO_SET_DEV(bio, bdev);
bio->bi_private = io;
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(CAS_SET_FLUSH(io->dir), bio);
out:
cas_bd_io_end(io, blkio->error);
}
static void block_dev_submit_discard(struct ocf_io *io)
{
struct blkio *blkio = cas_io_to_blkio(io);
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
struct block_device *bd = bdobj->btm_bd;
struct request_queue *q = bdev_get_queue(bd);
struct bio *bio = NULL;
unsigned int max_discard_sectors, granularity, bio_sects;
int alignment;
sector_t sects, start, end, tmp;
/* Prevent races of completing IO */
atomic_set(&blkio->rq_remaning, 1);
if (!q) {
/* No queue, error */
blkio->error = -ENXIO;
goto out;
}
if (!cas_has_discard_support(bd)) {
/* Discard is not supported by bottom device, send completion
* to caller
*/
goto out;
}
granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U);
alignment = (bdev_discard_alignment(bd) >> SECTOR_SHIFT) % granularity;
max_discard_sectors =
min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT);
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors))
goto out;
sects = io->bytes >> SECTOR_SHIFT;
start = io->addr >> SECTOR_SHIFT;
while (sects) {
bio = cas_bio_alloc(bd, GFP_NOIO, 1);
if (!bio) {
CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n");
blkio->error = -ENOMEM;
break;
}
bio_sects = min_t(sector_t, sects, max_discard_sectors);
end = start + bio_sects;
tmp = end;
if (bio_sects < sects &&
sector_div(tmp, granularity) != alignment) {
end = end - alignment;
sector_div(end, granularity);
end = end * granularity + alignment;
bio_sects = end - start;
}
CAS_BIO_SET_DEV(bio, bd);
CAS_BIO_BISECTOR(bio) = start;
CAS_BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT;
bio->bi_next = NULL;
bio->bi_private = io;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(CAS_BIO_DISCARD, bio);
sects -= bio_sects;
start = end;
cond_resched();
}
out:
cas_bd_io_end(io, blkio->error);
}
static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io)
{
struct blkio *bdio = cas_io_to_blkio(io);
/* Setup DIR */
bdio->dir = *dir;
/* Convert CAS direction into kernel values */
switch (bdio->dir) {
case OCF_READ:
*dir = READ;
break;
case OCF_WRITE:
*dir = WRITE;
break;
default:
bdio->error = -EINVAL;
break;
}
if (!io->bytes) {
/* Don not accept empty request */
CAS_PRINT_RL(KERN_ERR "Invalid zero size IO\n");
bdio->error = -EINVAL;
}
if (bdio->error)
return false;
return true;
}
/*
* Returns only flags that are relevant to request's direction.
*/
@ -328,44 +123,57 @@ static inline uint64_t filter_req_flags(int dir, uint64_t flags)
/*
*
*/
static void block_dev_submit_io(struct ocf_io *io)
CAS_DECLARE_BLOCK_CALLBACK(cas_bd_forward_end, struct bio *bio,
unsigned int bytes_done, int error)
{
struct blkio *bdio = cas_io_to_blkio(io);
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
struct bio_vec_iter *iter = &bdio->iter;
uint64_t addr = io->addr;
uint32_t bytes = io->bytes;
int dir = io->dir;
struct blk_plug plug;
ocf_forward_token_t token;
int err;
if (CAS_IS_SET_FLUSH(io->flags)) {
CAS_DEBUG_MSG("Flush request");
/* It is flush requests handle it */
block_dev_submit_flush(io);
return;
CAS_BLOCK_CALLBACK_INIT(bio);
token = (ocf_forward_token_t)bio->bi_private;
err = CAS_BLOCK_CALLBACK_ERROR(bio, error);
CAS_DEBUG_TRACE();
if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
err = 0;
ocf_forward_end(token, err);
bio_put(bio);
CAS_BLOCK_CALLBACK_RETURN();
}
static void block_dev_forward_io(ocf_volume_t volume,
ocf_forward_token_t token, int dir, uint64_t addr,
uint64_t bytes, uint64_t offset)
{
struct bd_object *bdobj = bd_object(volume);
struct blk_data *data = ocf_forward_get_data(token);
uint64_t flags = ocf_forward_get_flags(token);
int bio_dir = (dir == OCF_READ) ? READ : WRITE;
struct bio_vec_iter iter;
struct blk_plug plug;
int error = 0;
CAS_DEBUG_PARAM("Address = %llu, bytes = %u\n", addr, bytes);
/* Prevent races of completing IO */
atomic_set(&bdio->rq_remaning, 1);
if (!cas_bd_io_prepare(&dir, io)) {
CAS_DEBUG_MSG("Invalid request");
cas_bd_io_end(io, -EINVAL);
cas_io_iter_init(&iter, data->vec, data->size);
if (offset != cas_io_iter_move(&iter, offset)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
blk_start_plug(&plug);
while (cas_io_iter_is_next(iter) && bytes) {
while (cas_io_iter_is_next(&iter) && bytes) {
/* Still IO vectors to be sent */
/* Allocate BIO */
struct bio *bio = cas_bd_io_alloc_bio(bdobj->btm_bd, bdio);
struct bio *bio = cas_bd_io_alloc_bio(bdobj->btm_bd, &iter);
if (!bio) {
bdio->error = -ENOMEM;
error = -ENOMEM;
break;
}
@ -373,15 +181,15 @@ static void block_dev_submit_io(struct ocf_io *io)
CAS_BIO_SET_DEV(bio, bdobj->btm_bd);
CAS_BIO_BISECTOR(bio) = addr / SECTOR_SIZE;
bio->bi_next = NULL;
bio->bi_private = io;
CAS_BIO_OP_FLAGS(bio) |= filter_req_flags(dir, io->flags);
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
bio->bi_private = (void *)token;
CAS_BIO_OP_FLAGS(bio) |= filter_req_flags(bio_dir, flags);
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end);
/* Add pages */
while (cas_io_iter_is_next(iter) && bytes) {
struct page *page = cas_io_iter_current_page(iter);
uint32_t offset = cas_io_iter_current_offset(iter);
uint32_t length = cas_io_iter_current_length(iter);
while (cas_io_iter_is_next(&iter) && bytes) {
struct page *page = cas_io_iter_current_page(&iter);
uint32_t offset = cas_io_iter_current_offset(&iter);
uint32_t length = cas_io_iter_current_length(&iter);
int added;
if (length > bytes)
@ -400,19 +208,19 @@ static void block_dev_submit_io(struct ocf_io *io)
addr += added;
/* Update BIO vector iterator */
if (added != cas_io_iter_move(iter, added)) {
bdio->error = -ENOBUFS;
if (added != cas_io_iter_move(&iter, added)) {
error = -ENOBUFS;
break;
}
}
if (bdio->error == 0) {
if (error == 0) {
/* Increase IO reference for sending this IO */
atomic_inc(&bdio->rq_remaning);
ocf_forward_get(token);
/* Send BIO */
CAS_DEBUG_MSG("Submit IO");
cas_submit_bio(dir, bio);
cas_submit_bio(bio_dir, bio);
bio = NULL;
} else {
if (bio) {
@ -424,41 +232,146 @@ static void block_dev_submit_io(struct ocf_io *io)
break;
}
}
blk_finish_plug(&plug);
if (bytes && bdio->error == 0) {
if (bytes && error == 0) {
/* Not all bytes sent, mark error */
bdio->error = -ENOBUFS;
error = -ENOBUFS;
}
/* Prevent races of completing IO when
* there are still child IOs not being send.
*/
cas_bd_io_end(io, 0);
ocf_forward_end(token, error);
}
static void block_dev_forward_flush(ocf_volume_t volume,
ocf_forward_token_t token)
{
struct bd_object *bdobj = bd_object(volume);
struct request_queue *q = bdev_get_queue(bdobj->btm_bd);
struct bio *bio;
if (!q) {
/* No queue, error */
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush, call back */
ocf_forward_end(token, 0);
return;
}
bio = cas_bio_alloc(bdobj->btm_bd, GFP_NOIO, 0);
if (!bio) {
CAS_PRINT_RL(KERN_ERR "Couldn't allocate memory for BIO\n");
ocf_forward_end(token, -OCF_ERR_NO_MEM);
return;
}
CAS_BIO_SET_DEV(bio, bdobj->btm_bd);
bio->bi_private = (void *)token;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end);
cas_submit_bio(CAS_SET_FLUSH(0), bio);
}
static void block_dev_forward_discard(ocf_volume_t volume,
ocf_forward_token_t token, uint64_t addr, uint64_t bytes)
{
struct bd_object *bdobj = bd_object(volume);
struct request_queue *q = bdev_get_queue(bdobj->btm_bd);
struct bio *bio;
int error = 0;
unsigned int max_discard_sectors, granularity, bio_sects;
int alignment;
sector_t sects, start, end, tmp;
if (!q) {
/* No queue, error */
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
if (!cas_has_discard_support(bdobj->btm_bd)) {
/* Discard is not supported by bottom device, send completion
* to caller
*/
ocf_forward_end(token, 0);
return;
}
granularity = max(q->limits.discard_granularity >> SECTOR_SHIFT, 1U);
alignment = (bdev_discard_alignment(bdobj->btm_bd) >> SECTOR_SHIFT)
% granularity;
max_discard_sectors =
min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT);
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
ocf_forward_end(token, -OCF_ERR_INVAL);
return;
}
sects = bytes >> SECTOR_SHIFT;
start = addr >> SECTOR_SHIFT;
while (sects) {
bio = cas_bio_alloc(bdobj->btm_bd, GFP_NOIO, 1);
if (!bio) {
CAS_PRINT_RL(CAS_KERN_ERR "Couldn't allocate memory for BIO\n");
error = -OCF_ERR_NO_MEM;
break;
}
bio_sects = min_t(sector_t, sects, max_discard_sectors);
end = start + bio_sects;
tmp = end;
if (bio_sects < sects &&
sector_div(tmp, granularity) != alignment) {
end = end - alignment;
sector_div(end, granularity);
end = end * granularity + alignment;
bio_sects = end - start;
}
CAS_BIO_SET_DEV(bio, bdobj->btm_bd);
CAS_BIO_BISECTOR(bio) = start;
CAS_BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT;
bio->bi_next = NULL;
bio->bi_private = (void *)token;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_forward_end);
ocf_forward_get(token);
cas_submit_bio(CAS_BIO_DISCARD, bio);
sects -= bio_sects;
start = end;
cond_resched();
}
ocf_forward_end(token, error);
}
const struct ocf_volume_properties cas_object_blk_properties = {
.name = "Block_Device",
.io_priv_size = sizeof(struct blkio),
.volume_priv_size = sizeof(struct bd_object),
.caps = {
.atomic_writes = 0, /* Atomic writes not supported */
},
.ops = {
.submit_io = block_dev_submit_io,
.submit_flush = block_dev_submit_flush,
.submit_metadata = NULL,
.submit_discard = block_dev_submit_discard,
.forward_io = block_dev_forward_io,
.forward_flush = block_dev_forward_flush,
.forward_discard = block_dev_forward_discard,
.open = block_dev_open_object,
.close = block_dev_close_object,
.get_max_io_size = block_dev_get_max_io_size,
.get_length = block_dev_get_byte_length,
},
.io_ops = {
.set_data = cas_blk_io_set_data,
.get_data = cas_blk_io_get_data,
},
.deinit = NULL,
};

View File

@ -6,7 +6,6 @@
#ifndef __VOL_BLOCK_DEV_BOTTOM_H__
#define __VOL_BLOCK_DEV_BOTTOM_H__
int block_dev_init(void);
#endif /* __VOL_BLOCK_DEV_BOTTOM_H__ */

View File

@ -189,10 +189,11 @@ static void blkdev_complete_data_master(struct blk_data *master, int error)
cas_free_blk_data(master);
}
static void blkdev_complete_data(struct ocf_io *io, int error)
static void blkdev_complete_data(ocf_io_t io, void *priv1, void *priv2,
int error)
{
struct bio *bio = io->priv1;
struct blk_data *master = io->priv2;
struct bio *bio = priv1;
struct blk_data *master = priv2;
struct blk_data *data = ocf_io_get_data(io);
ocf_io_put(io);
@ -217,7 +218,7 @@ static int blkdev_handle_data_single(struct bd_object *bvol, struct bio *bio,
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
ocf_io_t io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
@ -317,9 +318,10 @@ err:
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
}
static void blkdev_complete_discard(struct ocf_io *io, int error)
static void blkdev_complete_discard(ocf_io_t io, void *priv1, void *priv2,
int error)
{
struct bio *bio = io->priv1;
struct bio *bio = priv1;
int result = map_cas_err_to_generic(error);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(result));
@ -331,7 +333,7 @@ static void blkdev_handle_discard(struct bd_object *bvol, struct bio *bio)
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
ocf_io_t io;
io = ocf_volume_new_io(bvol->front_volume, queue,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
@ -356,10 +358,11 @@ static void blkdev_handle_bio_noflush(struct bd_object *bvol, struct bio *bio)
blkdev_handle_data(bvol, bio);
}
static void blkdev_complete_flush(struct ocf_io *io, int error)
static void blkdev_complete_flush(ocf_io_t io, void *priv1, void *priv2,
int error)
{
struct bio *bio = io->priv1;
struct bd_object *bvol = io->priv2;
struct bio *bio = priv1;
struct bd_object *bvol = priv2;
int result = map_cas_err_to_generic(error);
ocf_io_put(io);
@ -378,7 +381,7 @@ static void blkdev_handle_flush(struct bd_object *bvol, struct bio *bio)
ocf_cache_t cache = ocf_volume_get_cache(bvol->front_volume);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
ocf_queue_t queue = cache_priv->io_queues[smp_processor_id()];
struct ocf_io *io;
ocf_io_t io;
io = ocf_volume_new_io(bvol->front_volume, queue, 0, 0, OCF_WRITE, 0,
CAS_SET_FLUSH(0));

2
ocf

@ -1 +1 @@
Subproject commit 1fbb00de8f40bab42eb5625053a61536c49382cb
Subproject commit 6907abeba2359fc02b75b0301bcc9112ef717cd5