Remove potentially_dirty counter from bottom volume

This counter is not accurate (missing required memory barrier
to avoid unwanted behavior due to processor optimizations)
and performance gain is not clear - generally global
atomic variables are something we would like to avoid
going forward.

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2020-09-28 14:14:22 +02:00
parent 3ea056eb30
commit 7905ca79fa
4 changed files with 3 additions and 73 deletions

View File

@ -14,18 +14,8 @@ struct casdsk_disk;
struct bd_object { struct bd_object {
struct casdsk_disk *dsk; struct casdsk_disk *dsk;
struct block_device *btm_bd; struct block_device *btm_bd;
/**
* This denotes state of volatile write cache of the device.
* This is set to true when:
* - opening the device
* - when writing to a device without FUA/FLUSH flags
* This is set to false when:
* - FLUSH request is completed on device.
* When it is false
* - FLUSH requests from upper layer are NOT passed to the device.
*/
atomic_t potentially_dirty;
uint32_t expobj_valid : 1; uint32_t expobj_valid : 1;
/*!< Bit indicates that exported object was created */ /*!< Bit indicates that exported object was created */

View File

@ -35,7 +35,6 @@ struct cas_atomic_io {
struct cas_atomic_io *master; struct cas_atomic_io *master;
atomic_t req_remaining; atomic_t req_remaining;
atomic_t potential_dirty;
uint32_t count; uint32_t count;
uint64_t addr; uint64_t addr;
@ -463,23 +462,8 @@ static CAS_DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,
goto out; goto out;
} }
switch (atom->dir) { if (atom->dir == OCF_READ && cas_atomic_rd_complete(atom))
case OCF_READ: atom->master->error = -EIO;
if (cas_atomic_rd_complete(atom))
atom->master->error = -EIO;
break;
case OCF_WRITE:
if (!cas_blk_is_flush_io(atom->flags)) {
atomic_inc(&bdobj->potentially_dirty);
} else {
/* IO flush finished, update potential
* dirty state
*/
atomic_sub(atomic_read(&atom->potential_dirty),
&bdobj->potentially_dirty);
}
break;
}
out: out:
/* Free BIO, no needed any more */ /* Free BIO, no needed any more */
@ -858,16 +842,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
CAS_DEBUG_TRACE(); CAS_DEBUG_TRACE();
blkio->dirty = atomic_read(&bdobj->potentially_dirty);
if (!blkio->dirty) {
/* Didn't write anything to underlying disk;
* no need to send req_flush
*/
io->end(io, 0);
return;
}
if (!q) { if (!q) {
io->end(io, -EINVAL); io->end(io, -EINVAL);
return; return;
@ -875,7 +849,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
if (!CAS_CHECK_QUEUE_FLUSH(q)) { if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush */ /* This block device does not support flush */
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
io->end(io, 0); io->end(io, 0);
return; return;
} }
@ -900,7 +873,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
/* Set up specific field */ /* Set up specific field */
atom->dir = OCF_WRITE; atom->dir = OCF_WRITE;
atomic_set(&atom->potential_dirty, blkio->dirty);
atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO); atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO);
if (IS_ERR(atom->request)) { if (IS_ERR(atom->request)) {

View File

@ -24,7 +24,6 @@ struct blkio {
int error; int error;
atomic_t rq_remaning; atomic_t rq_remaning;
atomic_t ref_counter; atomic_t ref_counter;
int32_t dirty;
int32_t dir; int32_t dir;
struct blk_data *data; /* IO data buffer */ struct blk_data *data; /* IO data buffer */

View File

@ -218,23 +218,6 @@ CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
CAS_DEBUG_TRACE(); CAS_DEBUG_TRACE();
if (err)
goto out;
if (bdio->dir == OCF_WRITE) {
/* IO was a write */
if (!cas_blk_is_flush_io(io->flags)) {
/* Device cache is dirty, mark it */
atomic_inc(&bdobj->potentially_dirty);
} else {
/* IO flush finished, update potential
* dirty state
*/
atomic_sub(bdio->dirty, &bdobj->potentially_dirty);
}
}
out:
if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD)) if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
err = 0; err = 0;
@ -253,21 +236,12 @@ static void block_dev_submit_flush(struct ocf_io *io)
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL; struct bio *bio = NULL;
blkio->dirty = atomic_read(&bdobj->potentially_dirty);
/* Prevent races of completing IO */ /* Prevent races of completing IO */
atomic_set(&blkio->rq_remaning, 1); atomic_set(&blkio->rq_remaning, 1);
/* Increase IO reference counter for FLUSH IO */ /* Increase IO reference counter for FLUSH IO */
ocf_io_get(io); ocf_io_get(io);
if (!blkio->dirty) {
/* Didn't write anything to underlying disk; no need to
* send req_flush
*/
goto out;
}
if (q == NULL) { if (q == NULL) {
/* No queue, error */ /* No queue, error */
blkio->error = -EINVAL; blkio->error = -EINVAL;
@ -276,7 +250,6 @@ static void block_dev_submit_flush(struct ocf_io *io)
if (!CAS_CHECK_QUEUE_FLUSH(q)) { if (!CAS_CHECK_QUEUE_FLUSH(q)) {
/* This block device does not support flush, call back */ /* This block device does not support flush, call back */
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
goto out; goto out;
} }
@ -396,14 +369,10 @@ out:
static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io) static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io)
{ {
struct blkio *bdio = cas_io_to_blkio(io); struct blkio *bdio = cas_io_to_blkio(io);
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
/* Setup DIR */ /* Setup DIR */
bdio->dir = *dir; bdio->dir = *dir;
/* Save dirty counter */
bdio->dirty = atomic_read(&bdobj->potentially_dirty);
/* Convert CAS direction into kernel values */ /* Convert CAS direction into kernel values */
switch (bdio->dir) { switch (bdio->dir) {
case OCF_READ: case OCF_READ: