Merge pull request #829 from robertbaldyga/kernel-5.10

Add support for kernel up to 5.11
This commit is contained in:
Robert Baldyga 2021-06-16 10:38:02 +02:00 committed by GitHub
commit 3e914ee600
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 616 additions and 1146 deletions

View File

@ -0,0 +1,74 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct block_device *bd; bd->bd_partno;" "linux/blk_types.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct hd_struct *hd; hd->partno;" "linux/genhd.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline int cas_bd_get_next_part(struct block_device *bd)
{
int part_no = 0;
struct gendisk *disk = bd->bd_disk;
struct disk_part_iter piter;
struct block_device *part;
mutex_lock(&bd->bd_mutex);
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter))) {
part_no = part->bd_partno;
break;
}
disk_part_iter_exit(&piter);
mutex_unlock(&bd->bd_mutex);
return part_no;
}" ;;
"2")
add_function "
static inline int cas_bd_get_next_part(struct block_device *bd)
{
int part_no = 0;
struct gendisk *disk = bd->bd_disk;
struct disk_part_iter piter;
struct hd_struct *part;
mutex_lock(&bd->bd_mutex);
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter))) {
part_no = part->partno;
break;
}
disk_part_iter_exit(&piter);
mutex_unlock(&bd->bd_mutex);
return part_no;
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -9,12 +9,15 @@
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "lookup_bdev(\"some_path\")" "linux/fs.h" "linux/blkdev.h"
if compile_module $cur_name "dev_t dev; lookup_bdev(\"some_path\", &dev)" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "lookup_bdev(\"some_path\", 0)" "linux/fs.h" "linux/blkdev.h"
elif compile_module $cur_name "lookup_bdev(\"some_path\")" "linux/fs.h" "linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
elif compile_module $cur_name "lookup_bdev(\"some_path\", 0)" "linux/fs.h" "linux/blkdev.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
@ -23,11 +26,77 @@ check() {
apply() {
case "$1" in
"1")
add_define "CAS_LOOKUP_BDEV(PATH) \\
lookup_bdev(PATH)" ;;
add_function "
static inline bool cas_bdev_exist(const char *path)
{
dev_t dev;
int result;
result = lookup_bdev(path, &dev);
return !result;
}"
add_function "
static inline bool cas_bdev_match(const char *path, struct block_device *bd)
{
dev_t dev;
int result;
result = lookup_bdev(path, &dev);
if (result)
return false;
return (bd->bd_dev == dev);
}" ;;
"2")
add_define "CAS_LOOKUP_BDEV(PATH) \\
lookup_bdev(PATH, 0)" ;;
add_function "
static inline bool cas_bdev_exist(const char *path)
{
struct block_device *bdev;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return false;
bdput(bdev);
return true;
}"
add_function "
static inline bool cas_bdev_match(const char *path, struct block_device *bd)
{
struct block_device *bdev;
bool match = false;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return false;
match = (bdev == bd);
bdput(bdev);
return match;
}" ;;
"3")
add_function "
static inline bool cas_bdev_exist(const char *path)
{
struct block_device *bdev;
bdev = lookup_bdev(path, 0);
if (IS_ERR(bdev))
return false;
bdput(bdev);
return true;
}"
add_function "
static inline bool cas_bdev_match(const char *path, struct block_device *bd)
{
struct block_device *bdev;
bool match = false;
bdev = lookup_bdev(path, 0);
if (IS_ERR(bdev))
return false;
match = (bdev == bd);
bdput(bdev);
return match;
}" ;;
*)
exit 1
esac

View File

@ -0,0 +1,37 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "bdev_nr_sectors(NULL);" "linux/genhd.h"
then
echo $cur_name 1 >> $config_file_path
elif compile_module $cur_name "struct block_device *bd; bd->bd_part->nr_sects;" "linux/blk_types.h"
then
echo $cur_name 2 >> $config_file_path
else
echo $cur_name X >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "cas_bdev_nr_sectors(bd) \\
bdev_nr_sectors(bd)" ;;
"2")
add_define "cas_bdev_nr_sectors(bd) \\
(bd->bd_part->nr_sects)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,37 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct block_device *bd; bdev_whole(bd);" "linux/blk_types.h" "linux/genhd.h"
then
echo $cur_name 1 >> $config_file_path
elif compile_module $cur_name "struct block_device *bd; bd->bd_contains;" "linux/blk_types.h"
then
echo $cur_name 2 >> $config_file_path
else
echo $cur_name X >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "cas_bdev_whole(bd) \\
bdev_whole(bd)" ;;
"2")
add_define "cas_bdev_whole(bd) \\
(bd->bd_contains)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,43 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "bdget_disk(NULL, 0);" "linux/genhd.h"
then
echo $cur_name 1 >> $config_file_path
elif compile_module $cur_name "bdgrab(NULL);" "linux/blkdev.h"
then
echo $cur_name 2 >> $config_file_path
else
echo $cur_name X >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline struct block_device *cas_bdget_disk(struct gendisk *gd)
{
return bdget_disk(gd, 0);
}" ;;
"2")
add_function "
static inline struct block_device *cas_bdget_disk(struct gendisk *gd)
{
return bdgrab(gd->part0);
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,34 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct bio *bio; bio->bi_disk;" "linux/blk_types.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct bio *bio; bio->bi_bdev;" "linux/blk_types.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_BIO_GET_GENDISK(bio) (bio->bi_disk)" ;;
"2")
add_define "CAS_BIO_GET_GENDISK(bio) (bio->bi_bdev->bd_disk)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -21,10 +21,10 @@ apply() {
case "$1" in
"1")
add_define "CAS_BLK_STATUS_T blk_status_t"
add_define "CAS_BLK_STS_OK BLK_STS_OK" ;;
add_define "CAS_BLK_STS_NOTSUPP BLK_STS_NOTSUPP" ;;
"2")
add_define "CAS_BLK_STATUS_T int"
add_define "CAS_BLK_STS_OK 0" ;;
add_define "CAS_BLK_STS_NOTSUPP -ENOTSUPP" ;;
*)
exit 1

View File

@ -1,54 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_mq_make_request(NULL, NULL);" "linux/blk-mq.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline void *cas_get_default_mk_request_fn(struct request_queue *q)
{
if (q->make_request_fn)
return q->make_request_fn;
return blk_mq_make_request;
}"
add_function "
static inline void cas_call_default_mk_request_fn(make_request_fn *fn,
struct request_queue *q, struct bio *bio)
{
percpu_ref_get(&q->q_usage_counter);
fn(q, bio);
}" ;;
"2")
add_function "
static inline void *cas_get_default_mk_request_fn(struct request_queue *q)
{
return q->make_request_fn;
}"
add_function "
static inline void cas_call_default_mk_request_fn(make_request_fn *fn,
struct request_queue *q, struct bio *bio)
{
fn(q, bio);
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -9,12 +9,15 @@
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
if compile_module $cur_name "BIO_FLUSH" "linux/bio.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "REQ_FLUSH" "linux/blk_types.h"
then
echo $cur_name "2" >> $config_file_path
elif compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
@ -23,15 +26,26 @@ check() {
apply() {
case "$1" in
"1")
add_define "CAS_REQ_FLUSH \\
REQ_PREFLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & BIO_FLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | BIO_FLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~BIO_FLUSH)" ;;
"2")
add_define "CAS_REQ_FLUSH \\
REQ_FLUSH"
add_define "CAS_FLUSH_SUPPORTED \\
1" ;;
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & REQ_FLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | REQ_FLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~REQ_FLUSH)" ;;
"3")
add_define "CAS_IS_SET_FLUSH(flags) \\
((flags) & REQ_PREFLUSH)"
add_define "CAS_SET_FLUSH(flags) \\
((flags) | REQ_PREFLUSH)"
add_define "CAS_CLEAR_FLUSH(flags) \\
((flags) & ~REQ_PREFLUSH)" ;;
*)
exit 1
esac

View File

@ -12,8 +12,11 @@ check() {
if compile_module $cur_name "blk_queue_make_request" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
elif compile_module $cur_name "struct request_queue *q; q->make_request_fn;" "linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "3" >> $config_file_path
fi
}
@ -33,6 +36,13 @@ apply() {
{
q->make_request_fn = mfn;
}" ;;
"3")
add_define "make_request_fn void"
add_function "
static inline void cas_blk_queue_make_request(struct request_queue *q,
make_request_fn *mfn)
{
}" ;;
*)
exit 1
esac

View File

@ -1,47 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "REQ_TYPE_FS" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_function "
static inline int cas_is_rq_type_fs(struct request *rq)
{
return rq->cmd_type == REQ_TYPE_FS;
}" ;;
"2")
add_function "
static inline int cas_is_rq_type_fs(struct request *rq)
{
switch (req_op(rq)){
case REQ_OP_READ:
case REQ_OP_WRITE:
case REQ_OP_FLUSH:
case REQ_OP_DISCARD:
return true;
default:
return false;
}
}" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -9,7 +9,7 @@
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_bidi_rq(rq);" "linux/blkdev.h"
if compile_module $cur_name "struct block_device_operations *ops; ops->submit_bio;" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
@ -20,11 +20,9 @@ check() {
apply() {
case "$1" in
"1")
add_define "CAS_BLK_BIDI_RQ(rq) \\
blk_bidi_rq(rq)" ;;
add_define "CAS_SET_SUBMIT_BIO(_fn) .submit_bio = _fn," ;;
"2")
add_define "CAS_BLK_BIDI_RQ(rq) \\
false" ;;
add_define "CAS_SET_SUBMIT_BIO(_fn)" ;;
*)
exit 1
esac

View File

@ -1,55 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "WRITE_FUA" "linux/fs.h"
then
if compile_module $cur_name "BIO_FUA" "linux/bio.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
elif compile_module $cur_name "REQ_FUA" "linux/blk_types.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "4" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_WRITE_FUA \\
WRITE_FUA"
add_define "CAS_IS_WRITE_FUA(flags) \\
((flags) & BIO_FUA)" ;;
"2")
add_define "CAS_WRITE_FUA \\
WRITE_FUA"
add_define "CAS_IS_WRITE_FUA(flags) \\
((flags) & REQ_FUA)" ;;
"3")
add_define "CAS_IS_WRITE_FUA(flags) \\
((flags) & REQ_FUA)"
add_define "CAS_WRITE_FUA \\
REQ_FUA" ;;
"4")
add_define "CAS_IS_WRITE_FUA(flags) \\
0"
add_define "CAS_WRITE_FUA \\
WRITE_BARRIER" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -1,55 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "WRITE_FLUSH_FUA" "linux/fs.h"
then
if compile_module $cur_name "BIO_FUA" "linux/bio.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
elif compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "4" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_WRITE_FLUSH_FUA \\
WRITE_FLUSH_FUA"
add_define "CAS_IS_WRITE_FLUSH_FUA(flags) \\
((BIO_FUA | BIO_FLUSH) == ((flags) & (BIO_FUA | BIO_FLUSH)))" ;;
"2")
add_define "CAS_WRITE_FLUSH_FUA \\
WRITE_FLUSH_FUA"
add_define "CAS_IS_WRITE_FLUSH_FUA(flags) \\
((REQ_FUA | CAS_REQ_FLUSH) == ((flags) & (REQ_FUA | CAS_REQ_FLUSH)))" ;;
"3")
add_define "CAS_IS_WRITE_FLUSH_FUA(flags) \\
((REQ_PREFLUSH | REQ_FUA) == ((flags) & (REQ_PREFLUSH |REQ_FUA)))"
add_define "CAS_WRITE_FLUSH_FUA \\
(REQ_PREFLUSH | REQ_FUA)" ;;
"4")
add_define "CAS_IS_WRITE_FLUSH_FUA(flags) \\
0"
add_define "CAS_WRITE_FLUSH_FUA \\
WRITE_BARRIER" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -34,56 +34,62 @@ apply() {
"1")
add_function "
static inline unsigned long long cas_generic_start_io_acct(
struct request_queue *q, struct bio *bio,
struct hd_struct *part)
struct bio *bio)
{
return bio_start_io_acct(bio);
}"
add_function "
static inline void cas_generic_end_io_acct(struct request_queue *q,
struct bio *bio, struct hd_struct *part,
unsigned long start_time)
static inline void cas_generic_end_io_acct(
struct bio *bio, unsigned long start_time)
{
bio_end_io_acct(bio, start_time);
}" ;;
"2")
add_function "
static inline unsigned long long cas_generic_start_io_acct(
struct request_queue *q, struct bio *bio,
struct hd_struct *part)
struct bio *bio)
{
generic_start_io_acct(q, bio_data_dir(bio), bio_sectors(bio), part);
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
generic_start_io_acct(gd->queue, bio_data_dir(bio),
bio_sectors(bio), &gd->part0);
return jiffies;
}"
add_function "
static inline void cas_generic_end_io_acct(struct request_queue *q,
struct bio *bio, struct hd_struct *part,
unsigned long start_time)
static inline void cas_generic_end_io_acct(
struct bio *bio, unsigned long start_time)
{
generic_end_io_acct(q, bio_data_dir(bio), part, start_time);
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
generic_end_io_acct(gd->queue, bio_data_dir(bio),
&gd->part0, start_time);
}" ;;
"3")
add_function "
static inline unsigned long long cas_generic_start_io_acct(
struct request_queue *q, struct bio *bio,
struct hd_struct *part)
struct bio *bio)
{
generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), part);
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
&gd->part0);
return jiffies;
}"
add_function "
static inline void cas_generic_end_io_acct(struct request_queue *q,
struct bio *bio, struct hd_struct *part,
unsigned long start_time)
static inline void cas_generic_end_io_acct(
struct bio *bio, unsigned long start_time)
{
generic_end_io_acct(bio_data_dir(bio), part, start_time);
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
generic_end_io_acct(bio_data_dir(bio), &gd->part0, start_time);
}" ;;
"4")
add_function "
static inline unsigned long long cas_generic_start_io_acct(
struct request_queue *q, struct bio *bio,
struct hd_struct *part)
struct bio *bio)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
int rw = bio_data_dir(bio);
int cpu = part_stat_lock();
part_round_stats(cpu, part);
@ -94,44 +100,47 @@ apply() {
return jiffies;
}"
add_function "
static inline void cas_generic_end_io_acct(struct request_queue *q,
struct bio *bio, struct hd_struct *part,
unsigned long start_time)
static inline void cas_generic_end_io_acct(
struct bio *bio, unsigned long start_time)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
int rw = bio_data_dir(bio);
unsigned long duration = jiffies - start_time;
int cpu = part_stat_lock();
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part, rw);
part_stat_add(cpu, &gd->part0, ticks[rw], duration);
part_round_stats(cpu, &gd->part0);
part_dec_in_flight(&gd->part0, rw);
part_stat_unlock();
}" ;;
"5")
add_function "
static inline unsigned long long cas_generic_start_io_acct(
struct request_queue *q, struct bio *bio,
struct hd_struct *part)
struct bio *bio)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
int rw = bio_data_dir(bio);
int cpu = part_stat_lock();
part_round_stats(NULL, cpu, part);
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, sectors[rw], bio_sectors(bio));
part_inc_in_flight(NULL, part, rw);
part_round_stats(NULL, cpu, &gd->part0);
part_stat_inc(cpu, &gd->part0, ios[rw]);
part_stat_add(cpu, &gd->part0, sectors[rw], bio_sectors(bio));
part_inc_in_flight(NULL, &gd->part0, rw);
part_stat_unlock();
return jiffies;
}"
add_function "
static inline void cas_generic_end_io_acct(struct request_queue *q,
struct bio *bio, struct hd_struct *part,
unsigned long start_time)
static inline void cas_generic_end_io_acct(
struct bio *bio, unsigned long start_time)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
int rw = bio_data_dir(bio);
unsigned long duration = jiffies - start_time;
int cpu = part_stat_lock();
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(NULL, cpu, part);
part_dec_in_flight(NULL, part, rw);
part_stat_add(cpu, &gd->part0, ticks[rw], duration);
part_round_stats(NULL, cpu, &gd->part0);
part_dec_in_flight(NULL, &gd->part0, rw);
part_stat_unlock();
}" ;;
*)

View File

@ -35,7 +35,7 @@ apply() {
}" ;;
"2")
add_define "CAS_CHECK_QUEUE_FLUSH(q) \\
((q)->flush_flags & CAS_REQ_FLUSH)"
CAS_IS_SET_FLUSH((q)->flush_flags)"
add_define "CAS_CHECK_QUEUE_FUA(q) \\
((q)->flush_flags & REQ_FUA)"
add_function "static inline void cas_set_queue_flush_fua(struct request_queue *q,
@ -43,7 +43,7 @@ apply() {
{
unsigned int flags = 0;
if (flush)
flags |= CAS_REQ_FLUSH;
flags = CAS_SET_FLUSH(flags);
if (fua)
flags |= REQ_FUA;
if (flags)

View File

@ -1,63 +0,0 @@
#!/bin/bash
#
# Copyright(c) 2012-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "WRITE_FLUSH" "linux/fs.h"
then
if compile_module $cur_name "BIO_FLUSH" "linux/bio.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
elif compile_module $cur_name "REQ_PREFLUSH" "linux/blk_types.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "4" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_RQ_IS_FLUSH(rq) \\
((rq)->cmd_flags & CAS_REQ_FLUSH)"
add_define "CAS_WRITE_FLUSH \\
WRITE_FLUSH"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
((flags) & BIO_FLUSH)" ;;
"2")
add_define "CAS_RQ_IS_FLUSH(rq) \\
((rq)->cmd_flags & CAS_REQ_FLUSH)"
add_define "CAS_WRITE_FLUSH \\
WRITE_FLUSH"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
((flags) & CAS_REQ_FLUSH)" ;;
"3")
add_define "CAS_RQ_IS_FLUSH(rq) \\
((rq)->cmd_flags & REQ_PREFLUSH)"
add_define "CAS_WRITE_FLUSH \\
(REQ_OP_WRITE | REQ_PREFLUSH)"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
(CAS_WRITE_FLUSH == ((flags) & CAS_WRITE_FLUSH))" ;;
"4")
add_define "CAS_RQ_IS_FLUSH(rq) \\
0"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
(WRITE_BARRIER == ((flags) & WRITE_BARRIER))"
add_define "CAS_WRITE_FLUSH \\
WRITE_BARRIER" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -52,6 +52,7 @@ compile_module(){
return 0;
}
void cleanup_module(void) {};
MODULE_LICENSE("GPL");
EOF
#######################################

View File

@ -1068,7 +1068,6 @@ out_bdev:
int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg,
struct kcas_insert_core *cmd_info)
{
struct block_device *bdev;
char core_name[OCF_CORE_NAME_SIZE] = {};
ocf_cache_t cache;
uint16_t core_id;
@ -1109,10 +1108,8 @@ int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg,
return 0;
}
bdev = CAS_LOOKUP_BDEV(cfg->uuid.data);
if (IS_ERR(bdev))
if (!cas_bdev_exist(cfg->uuid.data))
return -OCF_ERR_INVAL_VOLUME_TYPE;
bdput(bdev);
if (cmd_info->update_path)
return 0;
@ -1135,9 +1132,7 @@ static int cache_mngt_update_core_uuid(ocf_cache_t cache, const char *core_name,
{
ocf_core_t core;
ocf_volume_t vol;
struct block_device *bdev;
struct bd_object *bdvol;
bool match;
int result;
if (ocf_core_get_by_name(cache, core_name, name_len, &core)) {
@ -1154,19 +1149,7 @@ static int cache_mngt_update_core_uuid(ocf_cache_t cache, const char *core_name,
vol = ocf_core_get_volume(core);
bdvol = bd_object(vol);
/* lookup block device object for device pointed by uuid */
bdev = CAS_LOOKUP_BDEV(uuid->data);
if (IS_ERR(bdev)) {
printk(KERN_ERR "failed to lookup bdev%s\n", (char*)uuid->data);
return -ENODEV;
}
/* check whether both core id and uuid point to the same block device */
match = (bdvol->btm_bd == bdev);
bdput(bdev);
if (!match) {
if (!cas_bdev_match(uuid->data, bdvol->btm_bd)) {
printk(KERN_ERR "UUID provided does not match target core device\n");
return -ENODEV;
}
@ -1766,7 +1749,7 @@ int cache_mngt_prepare_cache_cfg(struct ocf_mngt_cache_config *cfg,
-OCF_ERR_INVAL_VOLUME_TYPE;
}
is_part = (bdev->bd_contains != bdev);
is_part = (cas_bdev_whole(bdev) != bdev);
part_count = cas_blk_get_part_count(bdev);
blkdev_put(bdev, (FMODE_EXCL|FMODE_READ));
@ -1872,7 +1855,7 @@ static void init_instance_complete(struct _cache_mngt_attach_context *ctx,
bdev = bd_cache_obj->btm_bd;
/* If we deal with whole device, reread partitions */
if (bdev->bd_contains == bdev)
if (cas_bdev_whole(bdev) == bdev)
cas_reread_partitions(bdev);
/* Set other back information */

View File

@ -31,8 +31,11 @@ struct bd_object {
atomic64_t pending_rqs;
/*!< This fields describes in flight IO requests */
struct workqueue_struct *workqueue;
/*< Workqueue for internally trigerred I/O */
struct workqueue_struct *btm_wq;
/*< Workqueue for I/O internally trigerred in bottom vol */
struct workqueue_struct *expobj_wq;
/*< Workqueue for I/O handled by top vol */
};
static inline struct bd_object *bd_object(ocf_volume_t vol)

View File

@ -832,7 +832,6 @@ out:
void cas_atomic_submit_flush(struct ocf_io *io)
{
#ifdef CAS_FLUSH_SUPPORTED
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
struct block_device *bdev = bdobj->btm_bd;
struct request_queue *q = bdev_get_queue(bdev);
@ -885,26 +884,13 @@ void cas_atomic_submit_flush(struct ocf_io *io)
out:
cas_atomic_end_atom(atom, blkio->error);
#else
/* Running operating system without support for REQ_FLUSH
* (i.e. SLES 11 SP 1) CAS cannot use flushing requests to handle
* power-fail safe Write-Back
*/
struct blkio *bdio = cas_io_to_blkio(io);
io->end(io, -EINVAL);
/* on SLES 11 SP 1 powerfail safety can only be achieved through
* disabling volatile write cache of disk itself.
*/
#endif
}
void cas_atomic_submit_io(struct ocf_io *io)
{
CAS_DEBUG_TRACE();
if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) &&
CAS_IS_WRITE_FLUSH(io->flags)) {
if (CAS_IS_SET_FLUSH(io->flags)) {
/* FLUSH */
cas_atomic_submit_flush(io);
return;
@ -953,8 +939,8 @@ void cas_atomic_close_object(ocf_volume_t volume)
{
struct bd_object *bdobj = bd_object(volume);
if(bdobj->workqueue)
destroy_workqueue(bdobj->workqueue);
if(bdobj->btm_wq)
destroy_workqueue(bdobj->btm_wq);
block_dev_close_object(volume);
}
@ -976,8 +962,8 @@ int cas_atomic_open_object(ocf_volume_t volume, void *volume_params)
memcpy(&bdobj->atomic_params, volume_params,
sizeof(bdobj->atomic_params));
bdobj->workqueue = create_workqueue("CAS_AT_ZER");
if (!bdobj->workqueue) {
bdobj->btm_wq = create_workqueue("CAS_AT_ZER");
if (!bdobj->btm_wq) {
cas_atomic_close_object(volume);
result = -ENOMEM;
goto end;
@ -1036,7 +1022,7 @@ static void _cas_atomic_write_zeroes_step_cmpl(struct ocf_io *io, int error)
_cas_atomic_write_zeroes_end(ctx, error);
} else {
/* submit next IO from work context */
queue_work(bdobj->workqueue, &ctx->cmpl_work);
queue_work(bdobj->btm_wq, &ctx->cmpl_work);
}
}

View File

@ -303,7 +303,7 @@ int cas_blk_identify_type_by_bdev(struct block_device *bdev,
atomic_params_int.is_mode_optimal = 1;
break;
#else
if (bdev == bdev->bd_contains) {
if (bdev == cas_bdev_whole(bdev)) {
/*
* Entire device - format isn't optimal
*/

View File

@ -9,17 +9,6 @@
#include "obj_blk.h"
#include "context.h"
static inline bool cas_blk_is_flush_io(unsigned long flags)
{
if ((flags & CAS_WRITE_FLUSH) == CAS_WRITE_FLUSH)
return true;
if ((flags & CAS_WRITE_FLUSH_FUA) == CAS_WRITE_FLUSH_FUA)
return true;
return false;
}
struct blkio {
int error;
atomic_t rq_remaning;

View File

@ -84,9 +84,9 @@ uint64_t block_dev_get_byte_length(ocf_volume_t vol)
struct block_device *bd = bdobj->btm_bd;
uint64_t sector_length;
sector_length = (bd->bd_contains == bd) ?
sector_length = (cas_bdev_whole(bd) == bd) ?
get_capacity(bd->bd_disk) :
bd->bd_part->nr_sects;
cas_bdev_nr_sectors(bd);
return sector_length << SECTOR_SHIFT;
}
@ -229,7 +229,6 @@ CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
static void block_dev_submit_flush(struct ocf_io *io)
{
#ifdef CAS_FLUSH_SUPPORTED
struct blkio *blkio = cas_io_to_blkio(io);
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
struct block_device *bdev = bdobj->btm_bd;
@ -267,22 +266,10 @@ static void block_dev_submit_flush(struct ocf_io *io)
bio->bi_private = io;
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(CAS_WRITE_FLUSH, bio);
cas_submit_bio(CAS_SET_FLUSH(0), bio);
out:
cas_bd_io_end(io, blkio->error);
#else
/* Running operating system without support for REQ_FLUSH
* (i.e. SLES 11 SP 1) CAS cannot use flushing requests to
* handle power-fail safe Write-Back
*/
io->end(io, -ENOTSUPP);
/* on SLES 11 SP 1 powerfail safety can only be achieved
* through disabling volatile write cache of disk itself.
*/
#endif
}
void block_dev_submit_discard(struct ocf_io *io)
@ -412,8 +399,7 @@ static void block_dev_submit_io(struct ocf_io *io)
uint32_t bytes = io->bytes;
int dir = io->dir;
if (!CAS_IS_WRITE_FLUSH_FUA(io->flags) &&
CAS_IS_WRITE_FLUSH(io->flags)) {
if (CAS_IS_SET_FLUSH(io->flags)) {
CAS_DEBUG_MSG("Flush request");
/* It is flush requests handle it */
block_dev_submit_flush(io);

View File

@ -6,34 +6,6 @@
#include "cas_cache.h"
#include "utils/cas_err.h"
#define BLK_RQ_POS(rq) (CAS_BIO_BISECTOR((rq)->bio))
#define BLK_RQ_BYTES(rq) blk_rq_bytes(rq)
static inline void _blockdev_end_request_all(struct request *rq, int error)
{
CAS_END_REQUEST_ALL(rq, CAS_ERRNO_TO_BLK_STS(
map_cas_err_to_generic(error)));
}
static inline bool _blockdev_can_handle_rq(struct request *rq)
{
int error = 0;
if (unlikely(!cas_is_rq_type_fs(rq)))
error = __LINE__;
if (unlikely(CAS_BLK_BIDI_RQ(rq)))
error = __LINE__;
if (error != 0) {
CAS_PRINT_RL(KERN_ERR "%s cannot handle request (ERROR %d)\n",
rq->rq_disk->disk_name, error);
return false;
}
return true;
}
static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
@ -58,443 +30,6 @@ static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
#endif
}
static inline unsigned long long _blockdev_start_io_acct(struct bio *bio)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
return cas_generic_start_io_acct(gd->queue, bio, &gd->part0);
}
static inline void _blockdev_end_io_acct(struct bio *bio,
unsigned long start_time)
{
struct gendisk *gd = CAS_BIO_GET_DEV(bio);
cas_generic_end_io_acct(gd->queue, bio, &gd->part0, start_time);
}
void block_dev_start_bio_fast(struct ocf_io *io)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
data->start_time = _blockdev_start_io_acct(bio);
}
void block_dev_complete_bio_fast(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
_blockdev_end_io_acct(bio, data->start_time);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
cas_free_blk_data(data);
}
void block_dev_complete_bio_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
}
void block_dev_complete_rq(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct request *rq = data->master_io_req;
_blockdev_end_request_all(rq, error);
ocf_io_put(io);
cas_free_blk_data(data);
}
void block_dev_complete_sub_rq(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct ocf_io *master = data->master_io_req;
struct blk_data *master_data = ocf_io_get_data(master);
if (error)
master_data->error = error;
if (atomic_dec_return(&master_data->master_remaining) == 0) {
_blockdev_end_request_all(master_data->master_io_req,
master_data->error);
cas_free_blk_data(master_data);
ocf_io_put(master);
}
ocf_io_put(io);
cas_free_blk_data(data);
}
void block_dev_complete_flush(struct ocf_io *io, int error)
{
struct request *rq = io->priv1;
_blockdev_end_request_all(rq, error);
ocf_io_put(io);
}
bool _blockdev_is_request_barier(struct request *rq)
{
struct bio *i_bio = rq->bio;
for_each_bio(i_bio) {
if (CAS_CHECK_BARRIER(i_bio))
return true;
}
return false;
}
static int _blockdev_alloc_many_requests(ocf_core_t core,
struct list_head *list, struct request *rq,
struct ocf_io *master)
{
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
int error = 0;
int flags = 0;
struct bio *bio;
struct ocf_io *sub_io;
struct blk_data *master_data = ocf_io_get_data(master);
struct blk_data *data;
INIT_LIST_HEAD(list);
/* Go over requests and allocate sub requests */
bio = rq->bio;
for_each_bio(bio) {
/* Setup BIO flags */
if (CAS_IS_WRITE_FLUSH_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH and FUA */
flags = CAS_WRITE_FLUSH_FUA;
} else if (CAS_IS_WRITE_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FUA */
flags = CAS_WRITE_FUA;
} else if (CAS_IS_WRITE_FLUSH(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH - It shall be handled in request handler */
error = -EINVAL;
break;
} else {
flags = 0;
}
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
error = -ENOMEM;
break;
}
_blockdev_set_bio_data(data, bio);
data->master_io_req = master;
sub_io = ocf_core_new_io(core,
cache_priv->io_queues[smp_processor_id()],
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), flags);
if (!sub_io) {
cas_free_blk_data(data);
error = -ENOMEM;
break;
}
data->io = sub_io;
error = ocf_io_set_data(sub_io, data, 0);
if (error) {
ocf_io_put(sub_io);
cas_free_blk_data(data);
break;
}
ocf_io_set_cmpl(sub_io, NULL, NULL, block_dev_complete_sub_rq);
list_add_tail(&data->list, list);
atomic_inc(&master_data->master_remaining);
}
if (error) {
CAS_PRINT_RL(KERN_ERR "Cannot handle request (ERROR %d)\n", error);
/* Go over list and free all */
while (!list_empty(list)) {
data = list_first_entry(list, struct blk_data, list);
list_del(&data->list);
sub_io = data->io;
ocf_io_put(sub_io);
cas_free_blk_data(data);
}
}
return error;
}
static void _blockdev_set_request_data(struct blk_data *data, struct request *rq)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
struct req_iterator iter;
struct bio_vec *bvec;
uint32_t i = 0;
rq_for_each_segment(bvec, rq, iter) {
BUG_ON(i >= data->size);
data->vec[i] = *bvec;
i++;
}
#else
struct req_iterator iter;
struct bio_vec bvec;
uint32_t i = 0;
rq_for_each_segment(bvec, rq, iter) {
BUG_ON(i >= data->size);
data->vec[i] = bvec;
i++;
}
#endif
}
/**
* @brief push flush request upon execution queue for given core device
*/
static int _blkdev_handle_flush_request(struct request *rq, ocf_core_t core)
{
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
0, 0, OCF_WRITE, 0, CAS_WRITE_FLUSH);
if (!io)
return -ENOMEM;
ocf_io_set_cmpl(io, rq, NULL, block_dev_complete_flush);
ocf_core_submit_flush(io);
return 0;
}
#ifdef RQ_CHECK_CONTINOUS
static inline bool _bvec_is_mergeable(struct bio_vec *bv1, struct bio_vec *bv2)
{
if (bv1 == NULL)
return true;
if (BIOVEC_PHYS_MERGEABLE(bv1, bv2))
return true;
return !bv2->bv_offset && !((bv1->bv_offset + bv1->bv_len) % PAGE_SIZE);
}
#endif
static uint32_t _blkdev_scan_request(ocf_cache_t cache, struct request *rq,
struct ocf_io *io, bool *single_io)
{
uint32_t size = 0;
struct req_iterator iter;
struct bio *bio_prev = NULL;
uint32_t io_class;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
struct bio_vec bvec;
#ifdef RQ_CHECK_CONTINOUS
struct bio_vec bvec_prev = { NULL, };
#endif
#else
struct bio_vec *bvec;
#ifdef RQ_CHECK_CONTINOUS
struct bio_vec *bvec_prev = NULL;
#endif
#endif
*single_io = true;
/* Scan BIOs in the request to:
* 1. Count the segments number
* 2. Check if requests contains many IO classes
* 3. Check if request is continuous (when process kernel stack is 8KB)
*/
rq_for_each_segment(bvec, rq, iter) {
/* Increase BIO data vector counter */
size++;
if (*single_io == false) {
/* Already detected complex request */
continue;
}
#ifdef RQ_CHECK_CONTINOUS
/*
* If request is not continous submit each bio as separate
* request, and prevent nvme driver from splitting requests.
* For large requests, nvme splitting causes stack overrun.
*/
if (!_bvec_is_mergeable(CAS_SEGMENT_BVEC(bvec_prev),
CAS_SEGMENT_BVEC(bvec))) {
*single_io = false;
continue;
}
bvec_prev = bvec;
#endif
if (bio_prev == iter.bio)
continue;
bio_prev = iter.bio;
/* Get class ID for given BIO */
io_class = cas_cls_classify(cache, iter.bio);
if (io->io_class != io_class) {
/*
* Request contains BIO with different IO classes and
* need to handle BIO separately
*/
*single_io = false;
}
}
return size;
}
static int __block_dev_queue_rq(struct request *rq, ocf_core_t core)
{
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
struct blk_data *data;
int master_flags = 0;
bool single_io;
uint32_t size;
int ret;
if (_blockdev_is_request_barier(rq) || !_blockdev_can_handle_rq(rq)) {
CAS_PRINT_RL(KERN_WARNING
"special bio was sent,not supported!\n");
return -ENOTSUPP;
}
if ((rq->cmd_flags & REQ_FUA) && CAS_RQ_IS_FLUSH(rq)) {
/* FLUSH and FUA */
master_flags = CAS_WRITE_FLUSH_FUA;
} else if (rq->cmd_flags & REQ_FUA) {
/* FUA */
master_flags = CAS_WRITE_FUA;
} else if (CAS_RQ_IS_FLUSH(rq)) {
/* FLUSH */
return _blkdev_handle_flush_request(rq, core);
}
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
BLK_RQ_POS(rq) << SECTOR_SHIFT, BLK_RQ_BYTES(rq),
(rq_data_dir(rq) == CAS_RQ_DATA_DIR_WR) ?
OCF_WRITE : OCF_READ,
cas_cls_classify(cache, rq->bio), master_flags);
if (!io) {
CAS_PRINT_RL(KERN_CRIT "Out of memory. Ending IO processing.\n");
return -ENOMEM;
}
size = _blkdev_scan_request(cache, rq, io, &single_io);
if (unlikely(size == 0)) {
CAS_PRINT_RL(KERN_ERR "Empty IO request\n");
ocf_io_put(io);
return -EINVAL;
}
if (single_io) {
data = cas_alloc_blk_data(size, GFP_NOIO);
if (data == NULL) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
ocf_io_put(io);
return -ENOMEM;
}
_blockdev_set_request_data(data, rq);
data->master_io_req = rq;
ret = ocf_io_set_data(io, data, 0);
if (ret) {
ocf_io_put(io);
cas_free_blk_data(data);
return -EINVAL;
}
ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_rq);
ocf_core_submit_io(io);
} else {
struct list_head list = LIST_HEAD_INIT(list);
data = cas_alloc_blk_data(0, GFP_NOIO);
if (data == NULL) {
printk(KERN_CRIT
"Out of memory. Ending IO processing.\n");
ocf_io_put(io);
return -ENOMEM;
}
data->master_io_req = rq;
if (ocf_io_set_data(io, data, 0)) {
ocf_io_put(io);
cas_free_blk_data(data);
return -EINVAL;
}
/* Allocate setup and setup */
ret = _blockdev_alloc_many_requests(core, &list, rq, io);
if (ret < 0) {
printk(KERN_CRIT
"Out of memory. Ending IO processing.\n");
cas_free_blk_data(data);
ocf_io_put(io);
return -ENOMEM;
}
BUG_ON(list_empty(&list));
/* Go over list and push request to the engine */
while (!list_empty(&list)) {
struct ocf_io *sub_io;
data = list_first_entry(&list, struct blk_data, list);
list_del(&data->list);
sub_io = data->io;
ocf_core_submit_io(sub_io);
}
}
return ret;
}
static CAS_BLK_STATUS_T _block_dev_queue_request(struct casdsk_disk *dsk, struct request *rq, void *private)
{
ocf_core_t core = private;
int ret = __block_dev_queue_rq(rq, core);
if (ret)
_blockdev_end_request_all(rq, ret);
return CAS_ERRNO_TO_BLK_STS(ret);
}
static inline int _blkdev_can_hndl_bio(struct bio *bio)
{
if (CAS_CHECK_BARRIER(bio)) {
@ -507,26 +42,8 @@ static inline int _blkdev_can_hndl_bio(struct bio *bio)
return 0;
}
static inline bool _blkdev_is_flush_fua_bio(struct bio *bio)
{
if (CAS_IS_WRITE_FLUSH_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH and FUA */
return true;
} else if (CAS_IS_WRITE_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FUA */
return true;
} else if (CAS_IS_WRITE_FLUSH(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH */
return true;
}
return false;
}
void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
{
#ifdef CAS_FLUSH_SUPPORTED
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_volume_t core_vol = ocf_core_get_volume(core);
ocf_volume_t cache_vol = ocf_cache_get_volume(cache);
@ -547,7 +64,6 @@ void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
fua = (CAS_CHECK_QUEUE_FUA(core_q) || CAS_CHECK_QUEUE_FUA(cache_q));
cas_set_queue_flush_fua(exp_q, flush, fua);
#endif
}
static void _blockdev_set_discard_properties(ocf_cache_t cache,
@ -609,7 +125,7 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
cache_bd = casdisk_functions.casdsk_disk_get_blkdev(bd_cache_vol->dsk);
BUG_ON(!cache_bd);
core_q = core_bd->bd_contains->bd_disk->queue;
core_q = cas_bdev_whole(core_bd)->bd_disk->queue;
cache_q = cache_bd->bd_disk->queue;
exp_q = casdisk_functions.casdsk_exp_obj_get_queue(dsk);
@ -627,7 +143,7 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
return -KCAS_ERR_UNALIGNED;
}
blk_queue_stack_limits(exp_q, core_q);
blk_stack_limits(&exp_q->limits, &core_q->limits, 0);
/* We don't want to receive splitted requests*/
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
@ -640,102 +156,82 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
return 0;
}
static void _blockdev_pending_req_inc(struct casdsk_disk *dsk, void *private)
{
struct defer_bio_context {
struct work_struct io_work;
void (*cb)(ocf_core_t core, struct bio *bio);
ocf_core_t core;
ocf_volume_t obj;
struct bd_object *bvol;
struct bio *bio;
};
BUG_ON(!private);
core = private;
obj = ocf_core_get_volume(core);
bvol = bd_object(obj);
BUG_ON(!bvol);
static void _blockdev_defer_bio_work(struct work_struct *work)
{
struct defer_bio_context *context;
atomic64_inc(&bvol->pending_rqs);
context = container_of(work, struct defer_bio_context, io_work);
context->cb(context->core, context->bio);
kfree(context);
}
static void _blockdev_pending_req_dec(struct casdsk_disk *dsk, void *private)
static void _blockdev_defer_bio(ocf_core_t core, struct bio *bio,
void (*cb)(ocf_core_t core, struct bio *bio))
{
ocf_core_t core;
ocf_volume_t obj;
struct bd_object *bvol;
struct defer_bio_context *context;
ocf_volume_t volume = ocf_core_get_volume(core);
struct bd_object *bvol = bd_object(volume);
BUG_ON(!private);
core = private;
obj = ocf_core_get_volume(core);
bvol = bd_object(obj);
BUG_ON(!bvol);
BUG_ON(!bvol->expobj_wq);
atomic64_dec(&bvol->pending_rqs);
}
static void _blockdev_make_request_discard(struct casdsk_disk *dsk,
struct request_queue *q, struct bio *bio, void *private)
{
ocf_core_t core = private;
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), OCF_WRITE, 0, 0);
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
context = kmalloc(sizeof(*context), GFP_ATOMIC);
if (!context) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_bio_discard);
ocf_core_submit_discard(io);
context->cb = cb;
context->bio = bio;
context->core = core;
INIT_WORK(&context->io_work, _blockdev_defer_bio_work);
queue_work(bvol->expobj_wq, &context->io_work);
}
static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
struct request_queue *q, struct bio *bio, void *private)
static void block_dev_complete_data(struct ocf_io *io, int error)
{
struct blk_data *data = ocf_io_get_data(io);
struct bio *bio = data->master_io_req;
cas_generic_end_io_acct(bio, data->start_time);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
cas_free_blk_data(data);
}
static void _blockdev_handle_data(ocf_core_t core, struct bio *bio)
{
ocf_core_t core;
ocf_cache_t cache;
struct cache_priv *cache_priv;
struct ocf_io *io;
struct blk_data *data;
uint64_t flags = CAS_BIO_OP_FLAGS(bio);
int ret;
BUG_ON(!private);
core = private;
cache = ocf_core_get_cache(core);
cache_priv = ocf_cache_get_priv(cache);
if (in_interrupt())
return CASDSK_BIO_NOT_HANDLED;
if (_blkdev_can_hndl_bio(bio))
return CASDSK_BIO_HANDLED;
if (_blkdev_is_flush_fua_bio(bio))
return CASDSK_BIO_NOT_HANDLED;
if (CAS_IS_DISCARD(bio)) {
_blockdev_make_request_discard(dsk, q, bio, private);
return CASDSK_BIO_HANDLED;
}
if (unlikely(CAS_BIO_BISIZE(bio) == 0)) {
CAS_PRINT_RL(KERN_ERR
"Not able to handle empty BIO, flags = "
CAS_BIO_OP_FLAGS_FORMAT "\n", CAS_BIO_OP_FLAGS(bio));
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return CASDSK_BIO_HANDLED;
return;
}
data = cas_alloc_blk_data(bio_segments(bio), GFP_NOIO);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return CASDSK_BIO_HANDLED;
return;
}
_blockdev_set_bio_data(data, bio);
@ -746,13 +242,13 @@ static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), 0);
cas_cls_classify(cache, bio), CAS_CLEAR_FLUSH(flags));
if (!io) {
printk(KERN_CRIT "Out of memory. Ending IO processing.\n");
cas_free_blk_data(data);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return CASDSK_BIO_HANDLED;
return;
}
ret = ocf_io_set_data(io, data, 0);
@ -760,36 +256,123 @@ static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
ocf_io_put(io);
cas_free_blk_data(data);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EINVAL));
return CASDSK_BIO_HANDLED;
return;
}
ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_bio_fast);
ocf_io_set_start(io, block_dev_start_bio_fast);
ocf_io_set_cmpl(io, NULL, NULL, block_dev_complete_data);
data->start_time = cas_generic_start_io_acct(bio);
ret = ocf_core_submit_io_fast(io);
if (ret < 0)
goto err;
ocf_core_submit_io(io);
}
return CASDSK_BIO_HANDLED;
static void block_dev_complete_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
err:
/*
* - Not able to processed fast path for this BIO,
* - Cleanup current request
* - Put it to the IO scheduler
*/
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(error));
ocf_io_put(io);
cas_free_blk_data(data);
}
return CASDSK_BIO_NOT_HANDLED;
static void _blockdev_handle_discard(ocf_core_t core, struct bio *bio)
{
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), OCF_WRITE, 0,
CAS_CLEAR_FLUSH(CAS_BIO_OP_FLAGS(bio)));
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
ocf_io_set_cmpl(io, bio, NULL, block_dev_complete_discard);
ocf_core_submit_discard(io);
}
static void _blockdev_handle_bio_noflush(ocf_core_t core, struct bio *bio)
{
if (CAS_IS_DISCARD(bio))
_blockdev_handle_discard(core, bio);
else
_blockdev_handle_data(core, bio);
}
static void block_dev_complete_flush(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
ocf_core_t core = io->priv2;
ocf_io_put(io);
if (CAS_BIO_BISIZE(bio) == 0 || error) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(error));
return;
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio_noflush);
else
_blockdev_handle_bio_noflush(core, bio);
}
static void _blkdev_handle_flush(ocf_core_t core, struct bio *bio)
{
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
io = ocf_core_new_io(core, cache_priv->io_queues[smp_processor_id()],
0, 0, OCF_WRITE, 0, CAS_SET_FLUSH(0));
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-ENOMEM));
return;
}
ocf_io_set_cmpl(io, bio, core, block_dev_complete_flush);
ocf_core_submit_flush(io);
}
static void _blockdev_handle_bio(ocf_core_t core, struct bio *bio)
{
if (CAS_IS_SET_FLUSH(CAS_BIO_OP_FLAGS(bio)))
_blkdev_handle_flush(core, bio);
else
_blockdev_handle_bio_noflush(core, bio);
}
static void _blockdev_submit_bio(struct casdsk_disk *dsk,
struct bio *bio, void *private)
{
ocf_core_t core = private;
BUG_ON(!core);
if (_blkdev_can_hndl_bio(bio)) {
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio),
CAS_ERRNO_TO_BLK_STS(-ENOTSUPP));
return;
}
if (in_interrupt())
_blockdev_defer_bio(core, bio, _blockdev_handle_bio);
else
_blockdev_handle_bio(core, bio);
}
static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {
.set_geometry = _blockdev_set_geometry,
.make_request_fn = _blockdev_make_request_fast,
.queue_rq_fn = _block_dev_queue_request,
.pending_rq_inc = _blockdev_pending_req_inc,
.pending_rq_dec = _blockdev_pending_req_dec,
.submit_bio = _blockdev_submit_bio,
};
/**
@ -820,17 +403,6 @@ int block_dev_activate_exported_object(ocf_core_t core)
return ret;
}
static int _block_dev_activate_exported_object(ocf_core_t core, void *cntx)
{
return block_dev_activate_exported_object(core);
}
int block_dev_activate_all_exported_objects(ocf_cache_t cache)
{
return ocf_core_visit(cache, _block_dev_activate_exported_object, NULL,
true);
}
static const char *get_cache_id_string(ocf_cache_t cache)
{
return ocf_cache_get_name(cache) + sizeof("cache") - 1;
@ -866,10 +438,23 @@ int block_dev_create_exported_object(ocf_core_t core)
return 0;
}
bvol->expobj_wq = alloc_workqueue("expobj_wq%s-%s",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0,
get_cache_id_string(cache),
get_core_id_string(core));
if (!bvol->expobj_wq) {
result = -ENOMEM;
goto end;
}
result = casdisk_functions.casdsk_exp_obj_create(dsk, dev_name,
THIS_MODULE, &_blockdev_exp_obj_ops);
if (!result)
bvol->expobj_valid = true;
if (result) {
destroy_workqueue(bvol->expobj_wq);
goto end;
}
bvol->expobj_valid = true;
end:
if (result) {
@ -879,17 +464,6 @@ end:
return result;
}
static int _block_dev_create_exported_object_visitor(ocf_core_t core, void *cntx)
{
return block_dev_create_exported_object(core);
}
int block_dev_create_all_exported_objects(ocf_cache_t cache)
{
return ocf_core_visit(cache, _block_dev_create_exported_object_visitor, NULL,
true);
}
int block_dev_destroy_exported_object(ocf_core_t core)
{
int ret = 0;
@ -899,6 +473,8 @@ int block_dev_destroy_exported_object(ocf_core_t core)
if (!bvol->expobj_valid)
return 0;
destroy_workqueue(bvol->expobj_wq);
ret = casdisk_functions.casdsk_exp_obj_lock(bvol->dsk);
if (ret) {
if (-EBUSY == ret)
@ -907,11 +483,11 @@ int block_dev_destroy_exported_object(ocf_core_t core)
}
ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk);
if (!ret)
bvol->expobj_valid = false;
casdisk_functions.casdsk_exp_obj_unlock(bvol->dsk);
return ret;
}
@ -954,6 +530,7 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
int ret;
if (bvol->expobj_valid) {
BUG_ON(!bvol->expobj_locked);
@ -961,8 +538,9 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
printk(KERN_INFO "Stopping device %s\n",
casdisk_functions.casdsk_exp_obj_get_gendisk(bvol->dsk)->disk_name);
casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
bvol->expobj_valid = false;
ret = casdisk_functions.casdsk_exp_obj_destroy(bvol->dsk);
if (!ret)
bvol->expobj_valid = false;
}
if (bvol->expobj_locked) {
@ -973,6 +551,15 @@ static int _block_dev_stop_exported_object(ocf_core_t core, void *cntx)
return 0;
}
static int _block_dev_free_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
casdisk_functions.casdsk_exp_obj_free(bvol->dsk);
return 0;
}
int block_dev_destroy_all_exported_objects(ocf_cache_t cache)
{
int result;
@ -989,21 +576,6 @@ int block_dev_destroy_all_exported_objects(ocf_cache_t cache)
ocf_core_visit(cache, _block_dev_stop_exported_object, NULL, true);
block_dev_free_all_exported_objects(cache);
return 0;
}
static int _block_dev_free_exported_object(ocf_core_t core, void *cntx)
{
struct bd_object *bvol = bd_object(
ocf_core_get_volume(core));
casdisk_functions.casdsk_exp_obj_free(bvol->dsk);
return 0;
}
int block_dev_free_all_exported_objects(ocf_cache_t cache)
{
return ocf_core_visit(cache, _block_dev_free_exported_object, NULL,
true);
}

View File

@ -6,15 +6,11 @@
#ifndef __VOL_BLOCK_DEV_TOP_H__
#define __VOL_BLOCK_DEV_TOP_H__
int block_dev_activate_all_exported_objects(ocf_cache_t cache);
int block_dev_activate_exported_object(ocf_core_t core);
int block_dev_create_all_exported_objects(ocf_cache_t cache);
int block_dev_create_exported_object(ocf_core_t core);
int block_dev_destroy_all_exported_objects(ocf_cache_t cache);
int block_dev_destroy_exported_object(ocf_core_t core);
int block_dev_free_all_exported_objects(ocf_cache_t cache);
#endif /* __VOL_BLOCK_DEV_TOP_H__ */

View File

@ -11,29 +11,11 @@
/**
* Version of cas_disk interface
*/
#define CASDSK_IFACE_VERSION 2
#define CASDSK_IFACE_VERSION 3
struct casdsk_disk;
#define CASDSK_BIO_NOT_HANDLED 0
#define CASDSK_BIO_HANDLED 1
struct casdsk_exp_obj_ops {
/**
* @brief Prepare request queue of exported object (top) block device.
* Could be NULL.
*/
int (*prepare_queue)(struct casdsk_disk *dsk, struct request_queue *q,
void *private);
/**
* @brief Cleanup request queue of exported object (top) block device.
* Could be NULL.
*/
void (*cleanup_queue)(struct casdsk_disk *dsk, struct request_queue *q,
void *private);
/**
* @brief Set geometry of exported object (top) block device.
* Could be NULL.
@ -41,39 +23,12 @@ struct casdsk_exp_obj_ops {
int (*set_geometry)(struct casdsk_disk *dsk, void *private);
/**
* @brief make_request_fn of exported object (top) block device.
* @brief submit_bio of exported object (top) block device.
* Called by cas_disk when cas_disk device is in attached mode.
*
* @return casdsk_BIO_HANDLED when bio was handled.
* Otherwise casdsk_BIO_NOT_HANDLED. In this case bio will be submitted
* to I/O scheduler and should be handled by request_fn.
*/
int (*make_request_fn)(struct casdsk_disk *dsk, struct request_queue *q,
void (*submit_bio)(struct casdsk_disk *dsk,
struct bio *bio, void *private);
/**
* @brief queue_rq_fn of exported object (top) block device.
* Called by cas_disk when cas_disk device is in attached mode.
*/
CAS_BLK_STATUS_T (*queue_rq_fn)(struct casdsk_disk *dsk, struct request *rq,
void *private);
/**
* @brief Increment exported object pending request counter.
*/
void (*pending_rq_inc)(struct casdsk_disk *dsk, void *private);
/**
* @brief Decrement exported object pending request counter.
*/
void (*pending_rq_dec)(struct casdsk_disk *dsk, void *private);
/**
* @brief ioctl handler of exported object (top) block device.
* Called by cas_disk when cas_disk device is in attached mode.
*/
int (*ioctl)(struct casdsk_disk *dsk, unsigned int cmd, unsigned long arg,
void *private);
};
/**

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kobject.h>
#include <linux/blkdev.h>
struct casdsk_stored_config {
size_t n_blobs;

View File

@ -281,9 +281,7 @@ struct request_queue *casdsk_disk_get_queue(struct casdsk_disk *dsk)
{
BUG_ON(!dsk);
BUG_ON(!dsk->bd);
BUG_ON(!dsk->bd->bd_contains);
BUG_ON(!dsk->bd->bd_contains->bd_disk);
return dsk->bd->bd_contains->bd_disk->queue;
return cas_bdev_whole(dsk->bd)->bd_disk->queue;
}
EXPORT_SYMBOL(casdsk_disk_get_queue);

View File

@ -67,17 +67,9 @@ void casdsk_deinit_exp_objs(void)
}
static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
int status = CASDSK_BIO_NOT_HANDLED;
if (likely(dsk->exp_obj->ops->make_request_fn))
status = dsk->exp_obj->ops->
make_request_fn(dsk, q, bio, dsk->private);
if (status == CASDSK_BIO_NOT_HANDLED)
cas_call_default_mk_request_fn(dsk->exp_obj->mk_rq_fn, q, bio);
dsk->exp_obj->ops->submit_bio(dsk, bio, dsk->private);
}
CAS_DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio,
@ -102,7 +94,6 @@ CAS_DECLARE_BLOCK_CALLBACK(_casdsk_exp_obj_bio_pt_io, struct bio *bio,
}
static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
struct bio *cloned_bio;
@ -133,13 +124,12 @@ static inline void _casdsk_exp_obj_handle_bio_pt(struct casdsk_disk *dsk,
}
static inline void _casdsk_exp_obj_handle_bio(struct casdsk_disk *dsk,
struct request_queue *q,
struct bio *bio)
{
if (likely(casdsk_disk_is_attached(dsk)))
_casdsk_exp_obj_handle_bio_att(dsk, q, bio);
_casdsk_exp_obj_handle_bio_att(dsk, bio);
else if (casdsk_disk_is_pt(dsk))
_casdsk_exp_obj_handle_bio_pt(dsk, q, bio);
_casdsk_exp_obj_handle_bio_pt(dsk, bio);
else if (casdsk_disk_is_shutdown(dsk))
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), CAS_ERRNO_TO_BLK_STS(-EIO));
else
@ -176,45 +166,27 @@ retry:
return cpu;
}
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q,
struct bio *bio)
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_submit_bio(struct bio *bio)
{
struct casdsk_disk *dsk;
unsigned int cpu;
BUG_ON(!bio);
BUG_ON(!q);
BUG_ON(!q->queuedata);
dsk = q->queuedata;
dsk = CAS_BIO_GET_GENDISK(bio)->private_data;
cpu = _casdsk_exp_obj_begin_rq(dsk);
_casdsk_exp_obj_handle_bio(dsk, q, bio);
_casdsk_exp_obj_handle_bio(dsk, bio);
_casdsk_exp_obj_end_rq(dsk, cpu);
KRETURN(0);
}
static int _casdsk_get_next_part_no(struct block_device *bd)
static MAKE_RQ_RET_TYPE _casdsk_exp_obj_make_rq_fn(struct request_queue *q,
struct bio *bio)
{
int part_no = 0;
struct gendisk *disk = bd->bd_disk;
struct disk_part_iter piter;
struct hd_struct *part;
mutex_lock(&bd->bd_mutex);
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter))) {
part_no = part->partno;
break;
}
disk_part_iter_exit(&piter);
mutex_unlock(&bd->bd_mutex);
return part_no;
return _casdsk_exp_obj_submit_bio(bio);
}
static int _casdsk_del_partitions(struct casdsk_disk *dsk)
@ -256,7 +228,7 @@ static int _casdsk_del_partitions(struct casdsk_disk *dsk)
goto out_copy;
}
while ((part_no = _casdsk_get_next_part_no(bd))) {
while ((part_no = cas_bd_get_next_part(bd))) {
bpart.pno = part_no;
result = copy_to_user((void __user *)usr_bpart, &bpart,
sizeof(bpart));
@ -295,7 +267,7 @@ static int _casdsk_exp_obj_hide_parts(struct casdsk_disk *dsk)
struct block_device *bd = casdsk_disk_get_blkdev(dsk);
struct gendisk *gdsk = casdsk_disk_get_gendisk(dsk);
if (bd != bd->bd_contains)
if (bd != cas_bdev_whole(bd))
/* It is partition, no more job required */
return 0;
@ -332,7 +304,7 @@ static int _casdsk_exp_obj_set_dev_t(struct casdsk_disk *dsk, struct gendisk *gd
bdev = casdsk_disk_get_blkdev(dsk);
BUG_ON(!bdev);
if (bdev->bd_contains != bdev) {
if (cas_bdev_whole(bdev) != bdev) {
minors = 1;
flags = 0;
} else {
@ -359,7 +331,7 @@ static void _casdsk_exp_obj_clear_dev_t(struct casdsk_disk *dsk)
struct block_device *bdev = casdsk_disk_get_blkdev(dsk);
struct gendisk *gdsk = casdsk_disk_get_gendisk(dsk);
if (bdev->bd_contains == bdev) {
if (cas_bdev_whole(bdev) == bdev) {
/* Restore previous configuration of bottom disk */
gdsk->minors = dsk->gd_minors;
gdsk->flags |= dsk->gd_flags;
@ -369,6 +341,7 @@ static void _casdsk_exp_obj_clear_dev_t(struct casdsk_disk *dsk)
static const struct block_device_operations _casdsk_exp_obj_ops = {
.owner = THIS_MODULE,
CAS_SET_SUBMIT_BIO(_casdsk_exp_obj_submit_bio)
};
static int casdsk_exp_obj_alloc(struct casdsk_disk *dsk)
@ -467,30 +440,9 @@ static int _casdsk_exp_obj_init_kobject(struct casdsk_disk *dsk)
}
static CAS_BLK_STATUS_T _casdsk_exp_obj_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
const struct blk_mq_queue_data *bd)
{
struct casdsk_disk *dsk = hctx->driver_data;
struct casdsk_exp_obj *exp_obj = dsk->exp_obj;
struct request *rq = bd->rq;
CAS_BLK_STATUS_T result = CAS_BLK_STS_OK;
if (likely(exp_obj->ops && exp_obj->ops->queue_rq_fn)) {
exp_obj->ops->pending_rq_inc(dsk, dsk->private);
result = exp_obj->ops->queue_rq_fn(dsk, rq, dsk->private);
exp_obj->ops->pending_rq_dec(dsk, dsk->private);
} else {
/*
* queue_rq_fn() is required, as we can't do any default
* action in attached mode. In PT mode we handle all bios
* directly in make_request_fn(), so queue_rq_fn() will not
* be called.
*/
BUG_ON(rq);
}
return result;
return CAS_BLK_STS_NOTSUPP;
}
static struct blk_mq_ops casdsk_mq_ops = {
@ -611,7 +563,6 @@ int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,
gd->private_data = dsk;
strlcpy(gd->disk_name, exp_obj->dev_name, sizeof(gd->disk_name));
dsk->exp_obj->mk_rq_fn = cas_get_default_mk_request_fn(queue);
cas_blk_queue_make_request(queue, _casdsk_exp_obj_make_rq_fn);
if (exp_obj->ops->set_geometry) {
@ -746,7 +697,7 @@ int casdsk_exp_obj_lock(struct casdsk_disk *dsk)
exp_obj = dsk->exp_obj;
exp_obj->locked_bd = bdget_disk(exp_obj->gd, 0);
exp_obj->locked_bd = cas_bdget_disk(exp_obj->gd);
if (!exp_obj->locked_bd)
return -ENAVAIL;

View File

@ -28,8 +28,6 @@ struct casdsk_exp_obj {
struct casdsk_exp_obj_ops *ops;
make_request_fn *mk_rq_fn;
const char *dev_name;
struct kobject kobj;