Extending 'configure' script

Functions and macros dependent on different kernel versions are now generated
before compilation basing on current kernel capabilities instead of hardcoding
them for specific kernels.

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
Michal Mielewczyk
2019-05-24 09:23:13 -04:00
parent f88d78f603
commit 1e5355eba1
50 changed files with 1079 additions and 742 deletions

View File

@@ -434,7 +434,7 @@ static void cas_atomic_end_atom(struct cas_atomic_io *atom, int error)
ocf_io_put(io);
}
static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,
static CAS_DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,
unsigned int bytes, int error)
{
int err;
@@ -443,12 +443,12 @@ static DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,
BUG_ON(!bio);
BUG_ON(!bio->bi_private);
err = BLOCK_CALLBACK_ERROR(bio, error);
err = CAS_BLOCK_CALLBACK_ERROR(bio, error);
atom = bio->bi_private;
BUG_ON(!atom->master);
bdobj = bd_object(atom->volume);
CAS_DEBUG_PARAM("BIO result = %d", BLOCK_CALLBACK_ERROR(bio, error));
CAS_DEBUG_PARAM("BIO result = %d", CAS_BLOCK_CALLBACK_ERROR(bio, error));
if (err != 0)
goto out;
@@ -504,7 +504,7 @@ static void _cas_atomic_setup_cmd(
cmd->rw.opcode = (dir == OCF_WRITE) ? nvme_cmd_write : nvme_cmd_read;
cmd->rw.nsid = cpu_to_le32(ns_id);
cmd->rw.slba = cpu_to_le64(BIO_BISECTOR(bio));
cmd->rw.slba = cpu_to_le64(CAS_BIO_BISECTOR(bio));
cmd->rw.length = cpu_to_le16((bytes / SECTOR_SIZE) - 1);
cmd->rw.control = cpu_to_le16(NVME_RW_LR);
@@ -562,11 +562,11 @@ static void cas_atomic_fire_atom(int dir, struct ocf_io *io,
/* Setup BIO */
bio->bi_bdev = bdev;
BIO_BISECTOR(bio) = atom->addr / SECTOR_SIZE;
CAS_BIO_BISECTOR(bio) = atom->addr / SECTOR_SIZE;
bio->bi_next = NULL;
bio->bi_private = atom;
BIO_OP_FLAGS(bio) |= io->flags;
bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom);
CAS_BIO_OP_FLAGS(bio) |= io->flags;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_atomic_fire_atom);
/* Add pages to the BIO */
bvec = atom->data->vec;
@@ -726,8 +726,8 @@ static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom)
}
nvm_discard->cattr = cpu_to_le32(0);
nvm_discard->nlb = cpu_to_le32(BIO_BISIZE(atom->bio) >> SECTOR_SHIFT);
nvm_discard->slba = cpu_to_le64(BIO_BISECTOR(atom->bio));
nvm_discard->nlb = cpu_to_le32(CAS_BIO_BISIZE(atom->bio) >> SECTOR_SHIFT);
nvm_discard->slba = cpu_to_le64(CAS_BIO_BISECTOR(atom->bio));
cmd->dsm.opcode = nvme_cmd_dsm;
cmd->dsm.nsid = cpu_to_le32(ns_id);
@@ -739,8 +739,8 @@ static int cas_atomic_submit_discard_bio(struct cas_atomic_io *atom)
offset = offset_in_page(nvm_discard);
blk_add_request_payload(req, page, offset, sizeof(*nvm_discard));
req->__sector = BIO_BISECTOR(atom->bio);
req->__data_len = BIO_BISIZE(atom->bio);
req->__sector = CAS_BIO_BISECTOR(atom->bio);
req->__data_len = CAS_BIO_BISIZE(atom->bio);
req->ioprio = bio_prio(atom->bio);
req->timeout = ADMIN_TIMEOUT;
@@ -782,7 +782,7 @@ static int cas_atomic_special_req_prepare(struct cas_atomic_io *atom,
return -ENOMEM;
}
atom->bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_atomic_fire_atom);
atom->bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_atomic_fire_atom);
atom->bio->bi_bdev = bdev;
atom->bio->bi_private = atom;
@@ -826,9 +826,9 @@ void cas_atomic_submit_discard(struct ocf_io *io)
/* Set up specific field */
atom->discard = true;
BIO_OP_FLAGS(atom->bio) = CAS_BIO_DISCARD;
BIO_BISECTOR(atom->bio) = io->addr / SECTOR_SIZE;
BIO_BISIZE(atom->bio) = io->bytes;
CAS_BIO_OP_FLAGS(atom->bio) = CAS_BIO_DISCARD;
CAS_BIO_BISECTOR(atom->bio) = io->addr / SECTOR_SIZE;
CAS_BIO_BISIZE(atom->bio) = io->bytes;
atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO);
if (IS_ERR(atom->request)) {
@@ -872,7 +872,7 @@ void cas_atomic_submit_flush(struct ocf_io *io)
return;
}
if (!CHECK_QUEUE_FLUSH(q)) {
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush */
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
io->end(io, 0);

View File

@@ -358,12 +358,12 @@ static inline int _cas_detect_blk_type(const char *path, uint8_t *type,
struct block_device *bdev;
char holder[] = "CAS DETECT\n";
bdev = OPEN_BDEV_EXCLUSIVE(path, FMODE_READ, holder);
bdev = blkdev_get_by_path(path, (FMODE_EXCL|FMODE_READ), holder);
if (IS_ERR(bdev))
return -OCF_ERR_NOT_OPEN_EXC;
ret = cas_blk_identify_type_by_bdev(bdev, type, atomic_params);
CLOSE_BDEV_EXCLUSIVE(bdev, FMODE_READ);
blkdev_put(bdev, (FMODE_EXCL|FMODE_READ));
return ret;
}
@@ -437,9 +437,9 @@ int _cas_blk_identify_type(const char *path, uint8_t *type,
if (IS_ERR(file))
return -OCF_ERR_INVAL_VOLUME_TYPE;
if (S_ISBLK(FILE_INODE(file)->i_mode))
if (S_ISBLK(CAS_FILE_INODE(file)->i_mode))
*type = BLOCK_DEVICE_VOLUME;
else if (S_ISCHR(FILE_INODE(file)->i_mode))
else if (S_ISCHR(CAS_FILE_INODE(file)->i_mode))
*type = NVME_CONTROLLER;
else
result = -OCF_ERR_INVAL_VOLUME_TYPE;

View File

@@ -11,10 +11,10 @@
static inline bool cas_blk_is_flush_io(unsigned long flags)
{
if ((flags & OCF_WRITE_FLUSH) == OCF_WRITE_FLUSH)
if ((flags & CAS_WRITE_FLUSH) == CAS_WRITE_FLUSH)
return true;
if ((flags & OCF_WRITE_FLUSH_FUA) == OCF_WRITE_FLUSH_FUA)
if ((flags & CAS_WRITE_FLUSH_FUA) == CAS_WRITE_FLUSH_FUA)
return true;
return false;

View File

@@ -198,7 +198,7 @@ static void cas_bd_io_end(struct ocf_io *io, int error)
/*
*
*/
DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
unsigned int bytes_done, int error)
{
struct ocf_io *io;
@@ -208,11 +208,11 @@ DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
BUG_ON(!bio);
BUG_ON(!bio->bi_private);
BLOCK_CALLBACK_INIT(bio);
CAS_BLOCK_CALLBACK_INIT(bio);
io = bio->bi_private;
bdobj = bd_object(io->volume);
BUG_ON(!bdobj);
err = BLOCK_CALLBACK_ERROR(bio, error);
err = CAS_BLOCK_CALLBACK_ERROR(bio, error);
bdio = cas_io_to_blkio(io);
BUG_ON(!bdio);
@@ -235,13 +235,13 @@ DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
}
}
out:
if (err == -EOPNOTSUPP && (BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
err = 0;
cas_bd_io_end(io, err);
bio_put(bio);
BLOCK_CALLBACK_RETURN();
CAS_BLOCK_CALLBACK_RETURN();
}
static void block_dev_submit_flush(struct ocf_io *io)
@@ -274,7 +274,7 @@ static void block_dev_submit_flush(struct ocf_io *io)
goto out;
}
if (!CHECK_QUEUE_FLUSH(q)) {
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush, call back */
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
goto out;
@@ -289,12 +289,12 @@ static void block_dev_submit_flush(struct ocf_io *io)
blkio->dir = io->dir;
bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end);
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
CAS_BIO_SET_DEV(bio, bdev);
bio->bi_private = io;
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(OCF_WRITE_FLUSH, bio);
cas_submit_bio(CAS_WRITE_FLUSH, bio);
out:
cas_bd_io_end(io, blkio->error);
@@ -374,11 +374,11 @@ void block_dev_submit_discard(struct ocf_io *io)
}
CAS_BIO_SET_DEV(bio, bd);
BIO_BISECTOR(bio) = start;
BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT;
CAS_BIO_BISECTOR(bio) = start;
CAS_BIO_BISIZE(bio) = bio_sects << SECTOR_SHIFT;
bio->bi_next = NULL;
bio->bi_private = io;
bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end);
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
atomic_inc(&blkio->rq_remaning);
cas_submit_bio(CAS_BIO_DISCARD, bio);
@@ -479,12 +479,11 @@ static void block_dev_submit_io(struct ocf_io *io)
/* Setup BIO */
CAS_BIO_SET_DEV(bio, bdobj->btm_bd);
BIO_BISECTOR(bio) = addr / SECTOR_SIZE;
CAS_BIO_BISECTOR(bio) = addr / SECTOR_SIZE;
bio->bi_next = NULL;
bio->bi_private = io;
BIO_OP_FLAGS(bio) |= io->flags;
BIO_SET_RW_FLAGS(bio);
bio->bi_end_io = REFER_BLOCK_CALLBACK(cas_bd_io_end);
CAS_BIO_OP_FLAGS(bio) |= io->flags;
bio->bi_end_io = CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end);
/* Add pages */
while (cas_io_iter_is_next(iter) && bytes) {
@@ -588,7 +587,7 @@ int block_dev_try_get_io_class(struct bio *bio, int *io_class)
{
struct ocf_io *io;
if (bio->bi_end_io != REFER_BLOCK_CALLBACK(cas_bd_io_end))
if (bio->bi_end_io != CAS_REFER_BLOCK_CALLBACK(cas_bd_io_end))
return -1;
io = bio->bi_private;

View File

@@ -5,7 +5,7 @@
#include "cas_cache.h"
#define BLK_RQ_POS(rq) (BIO_BISECTOR((rq)->bio))
#define BLK_RQ_POS(rq) (CAS_BIO_BISECTOR((rq)->bio))
#define BLK_RQ_BYTES(rq) blk_rq_bytes(rq)
extern u32 use_io_scheduler;
@@ -24,7 +24,7 @@ static inline bool _blockdev_can_handle_rq(struct request *rq)
{
int error = 0;
if (unlikely(!is_rq_type_fs(rq)))
if (unlikely(!cas_is_rq_type_fs(rq)))
error = __LINE__;
if (unlikely(rq->next_rq))
@@ -104,7 +104,7 @@ void block_dev_complete_bio_fast(struct ocf_io *io, int error)
_blockdev_end_io_acct(bio, data->start_time);
BIO_ENDIO(bio, BIO_BISIZE(bio), error);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), error);
ocf_io_put(io);
cas_free_blk_data(data);
}
@@ -113,7 +113,7 @@ void block_dev_complete_bio_discard(struct ocf_io *io, int error)
{
struct bio *bio = io->priv1;
BIO_ENDIO(bio, BIO_BISIZE(bio), error);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), error);
ocf_io_put(io);
}
@@ -161,7 +161,7 @@ bool _blockdev_is_request_barier(struct request *rq)
struct bio *i_bio = rq->bio;
for_each_bio(i_bio) {
if (CHECK_BARRIER(i_bio))
if (CAS_CHECK_BARRIER(i_bio))
return true;
}
return false;
@@ -186,13 +186,13 @@ static int _blockdev_alloc_many_requests(ocf_core_t core,
bio = rq->bio;
for_each_bio(bio) {
/* Setup BIO flags */
if (CAS_IS_WRITE_FLUSH_FUA(BIO_OP_FLAGS(bio))) {
if (CAS_IS_WRITE_FLUSH_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH and FUA */
flags = OCF_WRITE_FLUSH_FUA;
} else if (CAS_IS_WRITE_FUA(BIO_OP_FLAGS(bio))) {
flags = CAS_WRITE_FLUSH_FUA;
} else if (CAS_IS_WRITE_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FUA */
flags = OCF_WRITE_FUA;
} else if (CAS_IS_WRITE_FLUSH(BIO_OP_FLAGS(bio))) {
flags = CAS_WRITE_FUA;
} else if (CAS_IS_WRITE_FLUSH(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH - It shall be handled in request handler */
error = -EINVAL;
break;
@@ -220,8 +220,8 @@ static int _blockdev_alloc_many_requests(ocf_core_t core,
data->io = sub_io;
ocf_io_configure(sub_io, BIO_BISECTOR(bio) << SECTOR_SHIFT,
BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
ocf_io_configure(sub_io, CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT,
CAS_BIO_BISIZE(bio), (bio_data_dir(bio) == READ) ?
OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), flags);
@@ -294,7 +294,7 @@ static int _blkdev_handle_flush_request(struct request *rq, ocf_core_t core)
if (!io)
return -ENOMEM;
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, OCF_WRITE_FLUSH);
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, CAS_WRITE_FLUSH);
ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]);
ocf_io_set_cmpl(io, rq, NULL, block_dev_complete_flush);
@@ -359,8 +359,8 @@ static uint32_t _blkdev_scan_request(ocf_cache_t cache, struct request *rq,
* request, and prevent nvme driver from splitting requests.
* For large requests, nvme splitting causes stack overrun.
*/
if (!_bvec_is_mergeable(SEGMENT_BVEC(bvec_prev),
SEGMENT_BVEC(bvec))) {
if (!_bvec_is_mergeable(CAS_SEGMENT_BVEC(bvec_prev),
CAS_SEGMENT_BVEC(bvec))) {
*single_io = false;
continue;
}
@@ -404,13 +404,13 @@ static int _blkdev_handle_request(struct request *rq, ocf_core_t core)
return -ENOTSUPP;
}
if ((rq->cmd_flags & REQ_FUA) && RQ_IS_FLUSH(rq)) {
if ((rq->cmd_flags & REQ_FUA) && CAS_RQ_IS_FLUSH(rq)) {
/* FLUSH and FUA */
master_flags = OCF_WRITE_FLUSH_FUA;
master_flags = CAS_WRITE_FLUSH_FUA;
} else if (rq->cmd_flags & REQ_FUA) {
/* FUA */
master_flags = OCF_WRITE_FUA;
} else if (RQ_IS_FLUSH(rq)) {
master_flags = CAS_WRITE_FUA;
} else if (CAS_RQ_IS_FLUSH(rq)) {
/* FLUSH */
return _blkdev_handle_flush_request(rq, core);
}
@@ -422,7 +422,7 @@ static int _blkdev_handle_request(struct request *rq, ocf_core_t core)
}
ocf_io_configure(io, BLK_RQ_POS(rq) << SECTOR_SHIFT, BLK_RQ_BYTES(rq),
(RQ_DATA_DIR(rq) == RQ_DATA_DIR_WR) ?
(rq_data_dir(rq) == CAS_RQ_DATA_DIR_WR) ?
OCF_WRITE : OCF_READ,
cas_cls_classify(cache, rq->bio), master_flags);
@@ -506,10 +506,10 @@ static int _blkdev_handle_request(struct request *rq, ocf_core_t core)
static inline int _blkdev_can_hndl_bio(struct bio *bio)
{
if (CHECK_BARRIER(bio)) {
if (CAS_CHECK_BARRIER(bio)) {
CAS_PRINT_RL(KERN_WARNING
"special bio was sent, not supported!\n");
BIO_ENDIO(bio, BIO_BISIZE(bio), -EOPNOTSUPP);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -EOPNOTSUPP);
return -ENOTSUPP;
}
@@ -518,13 +518,13 @@ static inline int _blkdev_can_hndl_bio(struct bio *bio)
static inline bool _blkdev_is_flush_fua_bio(struct bio *bio)
{
if (CAS_IS_WRITE_FLUSH_FUA(BIO_OP_FLAGS(bio))) {
if (CAS_IS_WRITE_FLUSH_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH and FUA */
return true;
} else if (CAS_IS_WRITE_FUA(BIO_OP_FLAGS(bio))) {
} else if (CAS_IS_WRITE_FUA(CAS_BIO_OP_FLAGS(bio))) {
/* FUA */
return true;
} else if (CAS_IS_WRITE_FLUSH(BIO_OP_FLAGS(bio))) {
} else if (CAS_IS_WRITE_FLUSH(CAS_BIO_OP_FLAGS(bio))) {
/* FLUSH */
return true;
@@ -552,8 +552,8 @@ void _blockdev_set_exported_object_flush_fua(ocf_core_t core)
exp_q = casdisk_functions.casdsk_exp_obj_get_queue(bd_core_vol->dsk);
cache_q = casdisk_functions.casdsk_disk_get_queue(bd_cache_vol->dsk);
flush = (CHECK_QUEUE_FLUSH(core_q) || CHECK_QUEUE_FLUSH(cache_q));
fua = (CHECK_QUEUE_FUA(core_q) || CHECK_QUEUE_FUA(cache_q));
flush = (CAS_CHECK_QUEUE_FLUSH(core_q) || CAS_CHECK_QUEUE_FLUSH(cache_q));
fua = (CAS_CHECK_QUEUE_FUA(core_q) || CAS_CHECK_QUEUE_FUA(cache_q));
cas_set_queue_flush_fua(exp_q, flush, fua);
#endif
@@ -658,7 +658,7 @@ static int _blockdev_set_geometry(struct casdsk_disk *dsk, void *private)
blk_queue_stack_limits(exp_q, core_q);
/* We don't want to receive splitted requests*/
SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
CAS_SET_QUEUE_CHUNK_SECTORS(exp_q, 0);
_blockdev_set_exported_object_flush_fua(core);
@@ -712,11 +712,11 @@ static void _blockdev_make_request_discard(struct casdsk_disk *dsk,
if (!io) {
CAS_PRINT_RL(KERN_CRIT
"Out of memory. Ending IO processing.\n");
BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -ENOMEM);
return;
}
ocf_io_configure(io, BIO_BISECTOR(bio) << SECTOR_SHIFT, BIO_BISIZE(bio),
ocf_io_configure(io, CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT, CAS_BIO_BISIZE(bio),
0, 0, 0);
ocf_io_set_queue(io, cache_priv->io_queues[smp_processor_id()]);
@@ -754,18 +754,18 @@ static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
return CASDSK_BIO_HANDLED;
}
if (unlikely(BIO_BISIZE(bio) == 0)) {
if (unlikely(CAS_BIO_BISIZE(bio) == 0)) {
CAS_PRINT_RL(KERN_ERR
"Not able to handle empty BIO, flags = "
BIO_OP_FLAGS_FORMAT "\n", BIO_OP_FLAGS(bio));
BIO_ENDIO(bio, BIO_BISIZE(bio), -EINVAL);
CAS_BIO_OP_FLAGS_FORMAT "\n", CAS_BIO_OP_FLAGS(bio));
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -EINVAL);
return CASDSK_BIO_HANDLED;
}
data = cas_alloc_blk_data(bio_segments(bio), GFP_ATOMIC);
if (!data) {
CAS_PRINT_RL(KERN_CRIT "BIO data vector allocation error\n");
BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -ENOMEM);
return CASDSK_BIO_HANDLED;
}
@@ -778,11 +778,11 @@ static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
if (!io) {
printk(KERN_CRIT "Out of memory. Ending IO processing.\n");
cas_free_blk_data(data);
BIO_ENDIO(bio, BIO_BISIZE(bio), -ENOMEM);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -ENOMEM);
return CASDSK_BIO_HANDLED;
}
ocf_io_configure(io, BIO_BISECTOR(bio) << SECTOR_SHIFT, BIO_BISIZE(bio),
ocf_io_configure(io, CAS_BIO_BISECTOR(bio) << SECTOR_SHIFT, CAS_BIO_BISIZE(bio),
(bio_data_dir(bio) == READ) ? OCF_READ : OCF_WRITE,
cas_cls_classify(cache, bio), 0);
@@ -790,7 +790,7 @@ static int _blockdev_make_request_fast(struct casdsk_disk *dsk,
if (ret < 0) {
ocf_io_put(io);
cas_free_blk_data(data);
BIO_ENDIO(bio, BIO_BISIZE(bio), -EINVAL);
CAS_BIO_ENDIO(bio, CAS_BIO_BISIZE(bio), -EINVAL);
return CASDSK_BIO_HANDLED;
}