From 91b6098fda24d6bd3d3b008a609c1dc0a8a25029 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Thu, 16 Jul 2020 21:47:48 +0200 Subject: [PATCH] Two pass write invalidate Add second pass of write invalidate. It is necessary only if concurrent I/O had inserted target LBAs to cache after WI request did traversation. These LBAs might have been written by WI request behind the concurrent I/O's back, resulting in making these sectors effectively invalid. In this case we must update these sectors' metadata to reflect this. However we won't know about this after we traverse the request again - hence calling ocf_write_wi again with req->wi_second_pass set to indicate that this is the second pass (core write should be skipped). Signed-off-by: Adam Rutkowski --- src/engine/engine_wi.c | 86 +++++++++++++++++++++++++++++++----------- src/ocf_request.h | 3 ++ 2 files changed, 68 insertions(+), 21 deletions(-) diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c index ff947e4..aa40160 100644 --- a/src/engine/engine_wi.c +++ b/src/engine/engine_wi.c @@ -18,11 +18,43 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req); -static const struct ocf_io_if _io_if_wi_flush_metadata = { +static const struct ocf_io_if _io_if_wi_update_metadata = { .read = ocf_write_wi_update_and_flush_metadata, .write = ocf_write_wi_update_and_flush_metadata, }; +int _ocf_write_wi_next_pass(struct ocf_request *req) +{ + ocf_req_unlock_wr(req); + + if (req->wi_second_pass) { + req->complete(req, req->error); + ocf_req_put(req); + + return 0; + } + + /* Perform second pass of write invalidate. It is necessary + only if concurrent I/O had inserted target LBAs to cache after + this request did traversation. These LBAs might have been + written by this request behind the concurrent I/O's back, + resulting in making these sectors effectively invalid. + In this case we must update these sectors metadata to + reflect this. However we won't know about this after we + traverse the request again - hence calling ocf_write_wi + again with req->wi_second_pass set to indicate that this + is a second pass (core write should be skipped). */ + req->wi_second_pass = true; + ocf_write_wi(req); + + return 0; +} + +static const struct ocf_io_if _io_if_wi_next_pass = { + .read = _ocf_write_wi_next_pass, + .write = _ocf_write_wi_next_pass, +}; + static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error) { if (error) { @@ -33,6 +65,13 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error) if (env_atomic_dec_return(&req->req_remaining)) return; + if (!req->error && !req->wi_second_pass && ocf_engine_is_miss(req)) { + /* need another pass */ + ocf_engine_push_req_front_if(req, &_io_if_wi_next_pass, + true); + return; + } + if (req->error) ocf_engine_error(req, true, "Failed to write data to cache"); @@ -47,24 +86,27 @@ static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *req) { struct ocf_cache *cache = req->cache; + if (!ocf_engine_mapped_count(req)) { + /* jump directly to next pass */ + _ocf_write_wi_next_pass(req); + return 0; + } + + /* There are mapped cache line, need to remove them */ + env_atomic_set(&req->req_remaining, 1); /* One core IO */ - if (ocf_engine_mapped_count(req)) { - /* There are mapped cache line, need to remove them */ + ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/ - ocf_req_hash_lock_wr(req); /*- Metadata WR access ---------------*/ + /* Remove mapped cache lines from metadata */ + ocf_purge_map_info(req); - /* Remove mapped cache lines from metadata */ - ocf_purge_map_info(req); - - ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ - - if (req->info.flush_metadata) { - /* Request was dirty and need to flush metadata */ - ocf_metadata_flush_do_asynch(cache, req, - _ocf_write_wi_io_flush_metadata); - } + ocf_req_hash_unlock_wr(req); /*- END Metadata WR access ---------*/ + if (req->info.flush_metadata) { + /* Request was dirty and need to flush metadata */ + ocf_metadata_flush_do_asynch(cache, req, + _ocf_write_wi_io_flush_metadata); } _ocf_write_wi_io_flush_metadata(req, 0); @@ -92,12 +134,12 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error) ocf_req_put(req); } else { - ocf_engine_push_req_front_if(req, &_io_if_wi_flush_metadata, + ocf_engine_push_req_front_if(req, &_io_if_wi_update_metadata, true); } } -static int _ocf_write_wi_do(struct ocf_request *req) +static int _ocf_write_wi_core_write(struct ocf_request *req) { /* Get OCF request - increase reference counter */ ocf_req_get(req); @@ -127,9 +169,9 @@ static void _ocf_write_wi_on_resume(struct ocf_request *req) ocf_engine_push_req_front(req, true); } -static const struct ocf_io_if _io_if_wi_resume = { - .read = _ocf_write_wi_do, - .write = _ocf_write_wi_do, +static const struct ocf_io_if _io_if_wi_core_write = { + .read = _ocf_write_wi_core_write, + .write = _ocf_write_wi_core_write, }; int ocf_write_wi(struct ocf_request *req) @@ -144,7 +186,9 @@ int ocf_write_wi(struct ocf_request *req) ocf_req_get(req); /* Set resume io_if */ - req->io_if = &_io_if_wi_resume; + req->io_if = req->wi_second_pass ? + &_io_if_wi_update_metadata : + &_io_if_wi_core_write; ocf_req_hash(req); ocf_req_hash_lock_rd(req); /*- Metadata READ access, No eviction --------*/ @@ -163,7 +207,7 @@ int ocf_write_wi(struct ocf_request *req) if (lock >= 0) { if (lock == OCF_LOCK_ACQUIRED) { - _ocf_write_wi_do(req); + req->io_if->write(req); } else { /* WR lock was not acquired, need to wait for resume */ OCF_DEBUG_RQ(req, "NO LOCK"); diff --git a/src/ocf_request.h b/src/ocf_request.h index bdb6ca4..dca8627 100644 --- a/src/ocf_request.h +++ b/src/ocf_request.h @@ -188,6 +188,9 @@ struct ocf_request { uint8_t seq_cutoff : 1; /*!< Sequential cut off set for this request */ + uint8_t wi_second_pass : 1; + /*!< Set after first pass of WI write is completed */ + log_sid_t sid; /*!< Tracing sequence ID */