Remove potentially_dirty counter from bottom volume
This counter is not accurate (missing required memory barrier to avoid unwanted behavior due to processor optimizations) and performance gain is not clear - generally global atomic variables are something we would like to avoid going forward. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
3ea056eb30
commit
7905ca79fa
@ -14,18 +14,8 @@ struct casdsk_disk;
|
||||
|
||||
struct bd_object {
|
||||
struct casdsk_disk *dsk;
|
||||
|
||||
struct block_device *btm_bd;
|
||||
/**
|
||||
* This denotes state of volatile write cache of the device.
|
||||
* This is set to true when:
|
||||
* - opening the device
|
||||
* - when writing to a device without FUA/FLUSH flags
|
||||
* This is set to false when:
|
||||
* - FLUSH request is completed on device.
|
||||
* When it is false
|
||||
* - FLUSH requests from upper layer are NOT passed to the device.
|
||||
*/
|
||||
atomic_t potentially_dirty;
|
||||
|
||||
uint32_t expobj_valid : 1;
|
||||
/*!< Bit indicates that exported object was created */
|
||||
|
@ -35,7 +35,6 @@ struct cas_atomic_io {
|
||||
|
||||
struct cas_atomic_io *master;
|
||||
atomic_t req_remaining;
|
||||
atomic_t potential_dirty;
|
||||
uint32_t count;
|
||||
|
||||
uint64_t addr;
|
||||
@ -463,23 +462,8 @@ static CAS_DECLARE_BLOCK_CALLBACK(cas_atomic_fire_atom, struct bio *bio,
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (atom->dir) {
|
||||
case OCF_READ:
|
||||
if (cas_atomic_rd_complete(atom))
|
||||
if (atom->dir == OCF_READ && cas_atomic_rd_complete(atom))
|
||||
atom->master->error = -EIO;
|
||||
break;
|
||||
case OCF_WRITE:
|
||||
if (!cas_blk_is_flush_io(atom->flags)) {
|
||||
atomic_inc(&bdobj->potentially_dirty);
|
||||
} else {
|
||||
/* IO flush finished, update potential
|
||||
* dirty state
|
||||
*/
|
||||
atomic_sub(atomic_read(&atom->potential_dirty),
|
||||
&bdobj->potentially_dirty);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Free BIO, no needed any more */
|
||||
@ -858,16 +842,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
|
||||
|
||||
CAS_DEBUG_TRACE();
|
||||
|
||||
blkio->dirty = atomic_read(&bdobj->potentially_dirty);
|
||||
|
||||
if (!blkio->dirty) {
|
||||
/* Didn't write anything to underlying disk;
|
||||
* no need to send req_flush
|
||||
*/
|
||||
io->end(io, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!q) {
|
||||
io->end(io, -EINVAL);
|
||||
return;
|
||||
@ -875,7 +849,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
|
||||
|
||||
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
|
||||
/* This block device does not support flush */
|
||||
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
|
||||
io->end(io, 0);
|
||||
return;
|
||||
}
|
||||
@ -900,7 +873,6 @@ void cas_atomic_submit_flush(struct ocf_io *io)
|
||||
|
||||
/* Set up specific field */
|
||||
atom->dir = OCF_WRITE;
|
||||
atomic_set(&atom->potential_dirty, blkio->dirty);
|
||||
|
||||
atom->request = cas_blk_make_request(q, atom->bio, GFP_NOIO);
|
||||
if (IS_ERR(atom->request)) {
|
||||
|
@ -24,7 +24,6 @@ struct blkio {
|
||||
int error;
|
||||
atomic_t rq_remaning;
|
||||
atomic_t ref_counter;
|
||||
int32_t dirty;
|
||||
int32_t dir;
|
||||
|
||||
struct blk_data *data; /* IO data buffer */
|
||||
|
@ -218,23 +218,6 @@ CAS_DECLARE_BLOCK_CALLBACK(cas_bd_io_end, struct bio *bio,
|
||||
|
||||
CAS_DEBUG_TRACE();
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (bdio->dir == OCF_WRITE) {
|
||||
/* IO was a write */
|
||||
|
||||
if (!cas_blk_is_flush_io(io->flags)) {
|
||||
/* Device cache is dirty, mark it */
|
||||
atomic_inc(&bdobj->potentially_dirty);
|
||||
} else {
|
||||
/* IO flush finished, update potential
|
||||
* dirty state
|
||||
*/
|
||||
atomic_sub(bdio->dirty, &bdobj->potentially_dirty);
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (err == -EOPNOTSUPP && (CAS_BIO_OP_FLAGS(bio) & CAS_BIO_DISCARD))
|
||||
err = 0;
|
||||
|
||||
@ -253,21 +236,12 @@ static void block_dev_submit_flush(struct ocf_io *io)
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
struct bio *bio = NULL;
|
||||
|
||||
blkio->dirty = atomic_read(&bdobj->potentially_dirty);
|
||||
|
||||
/* Prevent races of completing IO */
|
||||
atomic_set(&blkio->rq_remaning, 1);
|
||||
|
||||
/* Increase IO reference counter for FLUSH IO */
|
||||
ocf_io_get(io);
|
||||
|
||||
if (!blkio->dirty) {
|
||||
/* Didn't write anything to underlying disk; no need to
|
||||
* send req_flush
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (q == NULL) {
|
||||
/* No queue, error */
|
||||
blkio->error = -EINVAL;
|
||||
@ -276,7 +250,6 @@ static void block_dev_submit_flush(struct ocf_io *io)
|
||||
|
||||
if (!CAS_CHECK_QUEUE_FLUSH(q)) {
|
||||
/* This block device does not support flush, call back */
|
||||
atomic_sub(blkio->dirty, &bdobj->potentially_dirty);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -396,14 +369,10 @@ out:
|
||||
static inline bool cas_bd_io_prepare(int *dir, struct ocf_io *io)
|
||||
{
|
||||
struct blkio *bdio = cas_io_to_blkio(io);
|
||||
struct bd_object *bdobj = bd_object(ocf_io_get_volume(io));
|
||||
|
||||
/* Setup DIR */
|
||||
bdio->dir = *dir;
|
||||
|
||||
/* Save dirty counter */
|
||||
bdio->dirty = atomic_read(&bdobj->potentially_dirty);
|
||||
|
||||
/* Convert CAS direction into kernel values */
|
||||
switch (bdio->dir) {
|
||||
case OCF_READ:
|
||||
|
Loading…
Reference in New Issue
Block a user