Merge pull request #83 from micrakow/blk-mq

Switch CAS to multi-queue block API
This commit is contained in:
Adam Rutkowski 2019-08-28 15:39:50 +02:00 committed by GitHub
commit 93ee265827
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 255 additions and 175 deletions

View File

@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_end_request_all(NULL, 0)" "linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_END_REQUEST_ALL blk_end_request_all" ;;
"2")
add_define "CAS_END_REQUEST_ALL blk_mq_end_request" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -0,0 +1,31 @@
#!/bin/bash
#
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "blk_status_t t" "linux/blk_types.h"
then
echo $cur_name "1" >> $config_file_path
else
echo $cur_name "2" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_BLK_STATUS_T blk_status_t" ;;
"2")
add_define "CAS_BLK_STATUS_T int" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -9,13 +9,17 @@
check() { check() {
cur_name=$(basename $2) cur_name=$(basename $2)
config_file_path=$1 config_file_path=$1
if compile_module $cur_name "vm_munmap(0, 0)" "linux/mm.h" if compile_module $cur_name "vm_munmap(0, 0); MAP_PRIVATE;" "linux/mm.h"
then then
echo $cur_name "1" >> $config_file_path echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "do_munmap(NULL, 0)" "linux/mm.h" elif compile_module $cur_name "do_munmap(NULL, 0)" "linux/mm.h"
then then
echo $cur_name "2" >> $config_file_path echo $cur_name "2" >> $config_file_path
else elif compile_module $cur_name "vm_munmap(0, 0); MAP_PRIVATE;" "linux/mm.h"\
"uapi/linux/mman.h"
then
echo $cur_name "3" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path echo $cur_name "X" >> $config_file_path
fi fi
} }
@ -50,6 +54,21 @@ apply() {
{ {
return do_munmap(current->mm, start, len); return do_munmap(current->mm, start, len);
}" ;; }" ;;
"3")
add_function "
#include <uapi/asm-generic/mman-common.h>
#include <uapi/linux/mman.h>
static inline unsigned long cas_vm_mmap(struct file *file,
unsigned long addr, unsigned long len)
{
return vm_mmap(file, addr, len, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
}"
add_function "
static inline int cas_vm_munmap(unsigned long start, size_t len)
{
return vm_munmap(start, len);
}" ;;
*) *)
exit 1 exit 1
esac esac

View File

@ -20,11 +20,11 @@ check() {
apply() { apply() {
case "$1" in case "$1" in
"1") "1")
add_define "cas_queue_flag_set_unlocked(flag, request_queue) \\ add_define "CAS_QUEUE_FLAG_SET(flag, request_queue) \\
blk_queue_flag_set(flag, request_queue)" ;; blk_queue_flag_set(flag, request_queue)" ;;
"2") "2")
add_define "cas_queue_flag_set_unlocked(flag, request_queue) \\ add_define "CAS_QUEUE_FLAG_SET(flag, request_queue) \\
queue_flag_set_unlocked(flag, request_queue)" ;; queue_flag_set(flag, request_queue)" ;;
*) *)
exit 1 exit 1
esac esac

View File

@ -0,0 +1,38 @@
#!/bin/bash
#
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. $(dirname $3)/conf_framework
check() {
cur_name=$(basename $2)
config_file_path=$1
if compile_module $cur_name "struct request_queue *q; spin_lock_irq(q->queue_lock);"\
"linux/blkdev.h"
then
echo $cur_name "1" >> $config_file_path
elif compile_module $cur_name "struct request_queue *q; spin_lock_irq(&q->queue_lock);"\
"linux/blkdev.h"
then
echo $cur_name "2" >> $config_file_path
else
echo $cur_name "X" >> $config_file_path
fi
}
apply() {
case "$1" in
"1")
add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(q->queue_lock)"
add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(q->queue_lock)" ;;
"2")
add_define "CAS_QUEUE_SPIN_LOCK(q) spin_lock_irq(&q->queue_lock)"
add_define "CAS_QUEUE_SPIN_UNLOCK(q) spin_unlock_irq(&q->queue_lock)" ;;
*)
exit 1
esac
}
conf_run $@

View File

@ -23,7 +23,11 @@ add_function() {
compile_module(){ compile_module(){
if [ $# -gt 2 ] if [ $# -gt 2 ]
then then
INCLUDE="#include <$3>" i=3
while [ "$i" -le "$#" ]; do
INCLUDE+=$(echo -e "\n#include <${!i}>\\n")
i=$((i + 1))
done
else else
INCLUDE="" INCLUDE=""
fi fi

View File

@ -9,16 +9,9 @@
#define BLK_RQ_POS(rq) (CAS_BIO_BISECTOR((rq)->bio)) #define BLK_RQ_POS(rq) (CAS_BIO_BISECTOR((rq)->bio))
#define BLK_RQ_BYTES(rq) blk_rq_bytes(rq) #define BLK_RQ_BYTES(rq) blk_rq_bytes(rq)
extern u32 use_io_scheduler;
static inline void __blockdev_end_request_all(struct request *rq, int error)
{
__blk_end_request_all(rq, map_cas_err_to_generic(error));
}
static inline void _blockdev_end_request_all(struct request *rq, int error) static inline void _blockdev_end_request_all(struct request *rq, int error)
{ {
blk_end_request_all(rq, map_cas_err_to_generic(error)); CAS_END_REQUEST_ALL(rq, map_cas_err_to_generic(error));
} }
static inline bool _blockdev_can_handle_rq(struct request *rq) static inline bool _blockdev_can_handle_rq(struct request *rq)
@ -28,8 +21,10 @@ static inline bool _blockdev_can_handle_rq(struct request *rq)
if (unlikely(!cas_is_rq_type_fs(rq))) if (unlikely(!cas_is_rq_type_fs(rq)))
error = __LINE__; error = __LINE__;
if (unlikely(rq->next_rq)) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
if (unlikely(blk_bidi_rq(rq)))
error = __LINE__; error = __LINE__;
#endif
if (error != 0) { if (error != 0) {
CAS_PRINT_RL(KERN_ERR "%s cannot handle request (ERROR %d)\n", CAS_PRINT_RL(KERN_ERR "%s cannot handle request (ERROR %d)\n",
@ -40,16 +35,6 @@ static inline bool _blockdev_can_handle_rq(struct request *rq)
return true; return true;
} }
static inline struct request *_blockdev_peek_request(struct request_queue *q)
{
return blk_peek_request(q);
}
static inline void _blockdev_start_request(struct request *rq)
{
blk_start_request(rq);
}
static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio) static void _blockdev_set_bio_data(struct blk_data *data, struct bio *bio)
{ {
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
@ -386,7 +371,7 @@ static uint32_t _blkdev_scan_request(ocf_cache_t cache, struct request *rq,
return size; return size;
} }
static int _blkdev_handle_request(struct request *rq, ocf_core_t core) static int __block_dev_queue_rq(struct request *rq, ocf_core_t core)
{ {
ocf_cache_t cache = ocf_core_get_cache(core); ocf_cache_t cache = ocf_core_get_cache(core);
struct cache_priv *cache_priv = ocf_cache_get_priv(cache); struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
@ -397,7 +382,7 @@ static int _blkdev_handle_request(struct request *rq, ocf_core_t core)
uint32_t size; uint32_t size;
int ret; int ret;
if (_blockdev_is_request_barier(rq)) { if (_blockdev_is_request_barier(rq) || !_blockdev_can_handle_rq(rq)) {
CAS_PRINT_RL(KERN_WARNING CAS_PRINT_RL(KERN_WARNING
"special bio was sent,not supported!\n"); "special bio was sent,not supported!\n");
return -ENOTSUPP; return -ENOTSUPP;
@ -499,7 +484,19 @@ static int _blkdev_handle_request(struct request *rq, ocf_core_t core)
} }
} }
return 0; return ret;
}
static int _block_dev_queue_request(struct casdsk_disk *dsk, struct request *rq, void *private)
{
ocf_core_t core = private;
int ret;
ret = __block_dev_queue_rq(rq, core);
if (ret)
_blockdev_end_request_all(rq, ret);
return ret;
} }
static inline int _blkdev_can_hndl_bio(struct bio *bio) static inline int _blkdev_can_hndl_bio(struct bio *bio)
@ -585,7 +582,7 @@ static void _blockdev_set_discard_properties(ocf_cache_t cache,
core_q = bdev_get_queue(core_bd); core_q = bdev_get_queue(core_bd);
cache_q = bdev_get_queue(cache_bd); cache_q = bdev_get_queue(cache_bd);
cas_queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, exp_q); CAS_QUEUE_FLAG_SET(QUEUE_FLAG_DISCARD, exp_q);
CAS_SET_DISCARD_ZEROES_DATA(exp_q->limits, 0); CAS_SET_DISCARD_ZEROES_DATA(exp_q->limits, 0);
if (core_q && blk_queue_discard(core_q)) { if (core_q && blk_queue_discard(core_q)) {
@ -666,13 +663,7 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
return 0; return 0;
} }
static inline bool _blockdev_is_elevator_inited(struct request_queue *q) static void _blockdev_pending_req_inc(struct casdsk_disk *dsk, void *private)
{
return !!block_dev_get_elevator_name(q);
}
static int _blockdev_prep_rq_fn(struct casdsk_disk *dsk, struct request_queue *q,
struct request *rq, void *private)
{ {
ocf_core_t core; ocf_core_t core;
ocf_volume_t obj; ocf_volume_t obj;
@ -685,17 +676,21 @@ static int _blockdev_prep_rq_fn(struct casdsk_disk *dsk, struct request_queue *q
BUG_ON(!bvol); BUG_ON(!bvol);
atomic64_inc(&bvol->pending_rqs); atomic64_inc(&bvol->pending_rqs);
return BLKPREP_OK;
} }
static int _blockdev_prepare_queue(struct casdsk_disk *dsk, static void _blockdev_pending_req_dec(struct casdsk_disk *dsk, void *private)
struct request_queue *q, void *private)
{ {
if (!_blockdev_is_elevator_inited(q)) ocf_core_t core;
return -EINVAL; ocf_volume_t obj;
struct bd_object *bvol;
return 0; BUG_ON(!private);
core = private;
obj = ocf_core_get_volume(core);
bvol = bd_object(obj);
BUG_ON(!bvol);
atomic64_dec(&bvol->pending_rqs);
} }
static void _blockdev_make_request_discard(struct casdsk_disk *dsk, static void _blockdev_make_request_discard(struct casdsk_disk *dsk,
@ -813,51 +808,12 @@ err:
return CASDSK_BIO_NOT_HANDLED; return CASDSK_BIO_NOT_HANDLED;
} }
static void _blockdev_request_fn(struct casdsk_disk *dsk, struct request_queue *q,
void *private)
{
ocf_core_t core;
ocf_volume_t obj;
struct bd_object *bvol;
struct request *rq;
int result;
BUG_ON(!private);
core = private;
obj = ocf_core_get_volume(core);
bvol = bd_object(obj);
while (true) {
rq = _blockdev_peek_request(q);
if (rq == NULL)
break;
_blockdev_start_request(rq);
if (!_blockdev_can_handle_rq(rq)) {
__blockdev_end_request_all(rq, -EIO);
continue;
}
spin_unlock_irq(q->queue_lock);
result = _blkdev_handle_request(rq, core);
spin_lock_irq(q->queue_lock);
if (result)
__blockdev_end_request_all(rq, result);
atomic64_dec(&bvol->pending_rqs);
}
}
static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = { static struct casdsk_exp_obj_ops _blockdev_exp_obj_ops = {
.prepare_queue = _blockdev_prepare_queue,
.set_geometry = _blockdev_set_geometry, .set_geometry = _blockdev_set_geometry,
.make_request_fn = _blockdev_make_request_fast, .make_request_fn = _blockdev_make_request_fast,
.request_fn = _blockdev_request_fn, .queue_rq_fn = _block_dev_queue_request,
.prep_rq_fn = _blockdev_prep_rq_fn, .pending_rq_inc = _blockdev_pending_req_inc,
.pending_rq_dec = _blockdev_pending_req_dec,
}; };
/** /**

View File

@ -51,18 +51,21 @@ struct casdsk_exp_obj_ops {
struct bio *bio, void *private); struct bio *bio, void *private);
/** /**
* @brief request_fn of exported object (top) block device. * @brief queue_rq_fn of exported object (top) block device.
* Called by cas_disk when cas_disk device is in attached mode. * Called by cas_disk when cas_disk device is in attached mode.
*/ */
void (*request_fn)(struct casdsk_disk *dsk, struct request_queue *q, int (*queue_rq_fn)(struct casdsk_disk *dsk, struct request *rq,
void *private); void *private);
/** /**
* @brief prep_rq_fn of exported object (top) block device. * @brief Increment exported object pending request counter.
* Called by cas_disk when cas_disk device is in attached mode.
*/ */
int (*prep_rq_fn)(struct casdsk_disk *dsk, struct request_queue *q, void (*pending_rq_inc)(struct casdsk_disk *dsk, void *private);
struct request *rq, void *private);
/**
* @brief Decrement exported object pending request counter.
*/
void (*pending_rq_dec)(struct casdsk_disk *dsk, void *private);
/** /**
* @brief ioctl handler of exported object (top) block device. * @brief ioctl handler of exported object (top) block device.

View File

@ -9,6 +9,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/blk-mq.h>
struct casdsk_exp_obj; struct casdsk_exp_obj;
@ -35,6 +36,7 @@ struct casdsk_disk {
int gd_flags; int gd_flags;
int gd_minors; int gd_minors;
struct blk_mq_tag_set tag_set;
struct casdsk_exp_obj *exp_obj; struct casdsk_exp_obj *exp_obj;
struct kobject kobj; struct kobject kobj;

View File

@ -8,6 +8,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/blkpg.h> #include <linux/blkpg.h>
#include <linux/elevator.h> #include <linux/elevator.h>
#include <linux/blk-mq.h>
#include "cas_disk_defs.h" #include "cas_disk_defs.h"
#include "cas_disk.h" #include "cas_disk.h"
@ -65,47 +66,6 @@ void casdsk_deinit_exp_objs(void)
kmem_cache_destroy(casdsk_module->exp_obj_cache); kmem_cache_destroy(casdsk_module->exp_obj_cache);
} }
static int _casdsk_exp_obj_prep_rq_fn(struct request_queue *q, struct request *rq)
{
struct casdsk_disk *dsk;;
BUG_ON(!q);
BUG_ON(!q->queuedata);
dsk = q->queuedata;
BUG_ON(!dsk->exp_obj);
if (likely(dsk->exp_obj->ops && dsk->exp_obj->ops->prep_rq_fn))
return dsk->exp_obj->ops->prep_rq_fn(dsk, q, rq, dsk->private);
else
return BLKPREP_OK;
}
static void _casdsk_exp_obj_request_fn(struct request_queue *q)
{
struct casdsk_disk *dsk;
struct request *rq;
BUG_ON(!q);
BUG_ON(!q->queuedata);
dsk = q->queuedata;
BUG_ON(!dsk);
BUG_ON(!dsk->exp_obj);
if (likely(dsk->exp_obj->ops && dsk->exp_obj->ops->request_fn)) {
dsk->exp_obj->ops->request_fn(dsk, q, dsk->private);
} else {
/*
* request_fn() is required, as we can't do any default
* action in attached mode. In PT mode we handle all bios
* directly in make_request_fn(), so request_fn() will not
* be called.
*/
rq = blk_peek_request(q);
BUG_ON(rq);
}
}
static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk, static inline void _casdsk_exp_obj_handle_bio_att(struct casdsk_disk *dsk,
struct request_queue *q, struct request_queue *q,
struct bio *bio) struct bio *bio)
@ -468,6 +428,70 @@ static int _casdsk_exp_obj_init_kobject(struct casdsk_disk *dsk)
return result; return result;
} }
static CAS_BLK_STATUS_T _casdsk_exp_obj_queue_qr(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct casdsk_disk *dsk = hctx->driver_data;
struct casdsk_exp_obj *exp_obj = dsk->exp_obj;
struct request *rq = bd->rq;
int result = 0;
if (likely(exp_obj->ops && exp_obj->ops->queue_rq_fn)) {
exp_obj->ops->pending_rq_inc(dsk, dsk->private);
result = exp_obj->ops->queue_rq_fn(dsk, rq, dsk->private);
exp_obj->ops->pending_rq_dec(dsk, dsk->private);
} else {
/*
* queue_rq_fn() is required, as we can't do any default
* action in attached mode. In PT mode we handle all bios
* directly in make_request_fn(), so queue_rq_fn() will not
* be called.
*/
BUG_ON(rq);
}
return result;
}
static struct blk_mq_ops casdsk_mq_ops = {
.queue_rq = _casdsk_exp_obj_queue_qr,
};
static void _casdsk_init_queues(struct casdsk_disk *dsk)
{
struct request_queue *q = dsk->exp_obj->queue;
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->nr_ctx || !hctx->tags)
continue;
hctx->driver_data = dsk;
}
}
static int _casdsk_init_tag_set(struct casdsk_disk *dsk, struct blk_mq_tag_set *set)
{
BUG_ON(!dsk);
BUG_ON(!set);
set->ops = &casdsk_mq_ops;
set->nr_hw_queues = num_online_cpus();
set->numa_node = NUMA_NO_NODE;
/*TODO: Should we inherit qd from core device? */
set->queue_depth = BLKDEV_MAX_RQ;
set->cmd_size = 0;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->driver_data = dsk;
return blk_mq_alloc_tag_set(set);
}
int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name, int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,
struct module *owner, struct casdsk_exp_obj_ops *ops) struct module *owner, struct casdsk_exp_obj_ops *ops)
{ {
@ -524,30 +548,28 @@ int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,
if (result) if (result)
goto error_dev_t; goto error_dev_t;
spin_lock_init(&exp_obj->rq_lock); result = _casdsk_init_tag_set(dsk, &dsk->tag_set);
if (result) {
goto error_init_tag_set;
}
queue = blk_init_queue(_casdsk_exp_obj_request_fn, &exp_obj->rq_lock); queue = blk_mq_init_queue(&dsk->tag_set);
if (!queue) { if (!queue) {
result = -ENOMEM; result = -ENOMEM;
goto error_init_queue; goto error_init_queue;
} }
BUG_ON(queue->queuedata); BUG_ON(queue->queuedata);
queue->queuedata = dsk; queue->queuedata = dsk;
exp_obj->queue = queue; exp_obj->queue = queue;
_casdsk_init_queues(dsk);
gd->fops = &_casdsk_exp_obj_ops; gd->fops = &_casdsk_exp_obj_ops;
gd->queue = queue; gd->queue = queue;
gd->private_data = dsk; gd->private_data = dsk;
strlcpy(gd->disk_name, exp_obj->dev_name, sizeof(gd->disk_name)); strlcpy(gd->disk_name, exp_obj->dev_name, sizeof(gd->disk_name));
if (exp_obj->ops->prepare_queue) {
result = exp_obj->ops->prepare_queue(dsk, queue, dsk->private);
if (result)
goto error_prepare_queue;
}
blk_queue_prep_rq(queue, _casdsk_exp_obj_prep_rq_fn);
dsk->exp_obj->mk_rq_fn = queue->make_request_fn; dsk->exp_obj->mk_rq_fn = queue->make_request_fn;
blk_queue_make_request(queue, _casdsk_exp_obj_make_rq_fn); blk_queue_make_request(queue, _casdsk_exp_obj_make_rq_fn);
@ -562,9 +584,9 @@ int casdsk_exp_obj_create(struct casdsk_disk *dsk, const char *dev_name,
error_set_geometry: error_set_geometry:
if (exp_obj->ops->cleanup_queue) if (exp_obj->ops->cleanup_queue)
exp_obj->ops->cleanup_queue(dsk, queue, dsk->private); exp_obj->ops->cleanup_queue(dsk, queue, dsk->private);
error_prepare_queue:
blk_cleanup_queue(queue);
error_init_queue: error_init_queue:
blk_mq_free_tag_set(&dsk->tag_set);
error_init_tag_set:
_casdsk_exp_obj_clear_dev_t(dsk); _casdsk_exp_obj_clear_dev_t(dsk);
error_dev_t: error_dev_t:
put_disk(gd); put_disk(gd);
@ -744,6 +766,8 @@ int casdsk_exp_obj_destroy(struct casdsk_disk *dsk)
if (exp_obj->queue) if (exp_obj->queue)
blk_cleanup_queue(exp_obj->queue); blk_cleanup_queue(exp_obj->queue);
blk_mq_free_tag_set(&dsk->tag_set);
atomic_set(&dsk->mode, CASDSK_MODE_UNKNOWN); atomic_set(&dsk->mode, CASDSK_MODE_UNKNOWN);
put_disk(exp_obj->gd); put_disk(exp_obj->gd);
@ -786,40 +810,12 @@ static void _casdsk_exp_obj_wait_for_pending_rqs(struct casdsk_disk *dsk)
schedule(); schedule();
} }
#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 0)
static void _casdsk_exp_obj_drain_elevator(struct request_queue *q)
{
if (q->elevator && q->elevator->elevator_type)
while (q->elevator->elevator_type->ops.
elevator_dispatch_fn(q, 1))
;
}
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(4, 10, 0)
static void _casdsk_exp_obj_drain_elevator(struct request_queue *q)
{
if (q->elevator && q->elevator->type)
while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
;
}
#else
static void _casdsk_exp_obj_drain_elevator(struct request_queue *q)
{
if (q->elevator && q->elevator->type)
while (q->elevator->type->ops.sq.elevator_dispatch_fn(q, 1))
;
}
#endif
static void _casdsk_exp_obj_flush_queue(struct casdsk_disk *dsk) static void _casdsk_exp_obj_flush_queue(struct casdsk_disk *dsk)
{ {
struct casdsk_exp_obj *exp_obj = dsk->exp_obj; struct casdsk_exp_obj *exp_obj = dsk->exp_obj;
struct request_queue *q = exp_obj->queue; struct request_queue *q = exp_obj->queue;
spin_lock_irq(q->queue_lock); blk_mq_run_hw_queues(q, false);
_casdsk_exp_obj_drain_elevator(q);
spin_unlock_irq(q->queue_lock);
blk_run_queue(q);
blk_sync_queue(q); blk_sync_queue(q);
} }

View File

@ -19,7 +19,6 @@ struct casdsk_exp_obj {
struct gendisk *gd; struct gendisk *gd;
struct request_queue *queue; struct request_queue *queue;
spinlock_t rq_lock;
struct block_device *locked_bd; struct block_device *locked_bd;

View File

@ -37,7 +37,8 @@ CACHE_LINE_SIZES="4 8 16 32 64"
for mode in $CACHE_MODES; do for mode in $CACHE_MODES; do
for line_size in $CACHE_LINE_SIZES; do for line_size in $CACHE_LINE_SIZES; do
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" \ CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" \
CACHE_MODE_OPTION="$mode" CACHE_LINE_SIZE="$line_size" start_cache CACHE_MODE_OPTION="$mode" CACHE_LINE_SIZE="$line_size" \
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
for engine in $IO_ENGINES; do for engine in $IO_ENGINES; do