diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c index a00c674..a2b6cb5 100644 --- a/src/metadata/metadata_io.c +++ b/src/metadata/metadata_io.c @@ -177,147 +177,184 @@ int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue, void *priv, return 0; } -static void metadata_io_i_asynch_end(struct metadata_io_request *request, - int error); - -static int ocf_restart_meta_io(struct ocf_request *req); - -static struct ocf_io_if meta_restart_if = { - .read = ocf_restart_meta_io, - .write = ocf_restart_meta_io -}; - -static void metadata_io_i_asynch_cmpl(struct ocf_io *io, int error) +static void metadata_io_req_fill(struct metadata_io_request *m_req) { - struct metadata_io_request *request = io->priv1; + ocf_cache_t cache = m_req->cache; + int i; - metadata_io_i_asynch_end(request, error); + for (i = 0; i < m_req->count; i++) { + m_req->on_meta_fill(cache, m_req->data, + m_req->page + i, m_req->context); + } +} +static void metadata_io_req_drain(struct metadata_io_request *m_req) +{ + ocf_cache_t cache = m_req->cache; + int i; + + for (i = 0; i < m_req->count; i++) { + m_req->on_meta_drain(cache, m_req->data, + m_req->page + i, m_req->context); + } +} + +static void metadata_io_io_end(struct metadata_io_request *m_req, int error); + +static void metadata_io_io_cmpl(struct ocf_io *io, int error) +{ + metadata_io_io_end(io->priv1, error); ocf_io_put(io); } -static void metadata_io_req_fill(struct metadata_io_request *meta_io_req) +static int metadata_io_restart_req(struct ocf_request *req) { - ocf_cache_t cache = meta_io_req->cache; - int i; - - for (i = 0; i < meta_io_req->count; i++) { - meta_io_req->on_meta_fill(cache, meta_io_req->data, - meta_io_req->page + i, meta_io_req->context); - } -} - -static void metadata_io_req_drain(struct metadata_io_request *meta_io_req) -{ - ocf_cache_t cache = meta_io_req->cache; - int i; - - for (i = 0; i < meta_io_req->count; i++) { - meta_io_req->on_meta_drain(cache, meta_io_req->data, - meta_io_req->page + i, meta_io_req->context); - } -} - -static int ocf_restart_meta_io(struct ocf_request *req) -{ - struct metadata_io_request *meta_io_req = req->priv; + struct metadata_io_request *m_req = req->priv; ocf_cache_t cache = req->cache; struct ocf_io *io; int ret; /* Fill with the latest metadata. */ - ocf_metadata_start_shared_access(&cache->metadata.lock); - metadata_io_req_fill(meta_io_req); - ocf_metadata_end_shared_access(&cache->metadata.lock); + if (m_req->req.rw == OCF_WRITE) { + ocf_metadata_start_shared_access(&cache->metadata.lock); + metadata_io_req_fill(m_req); + ocf_metadata_end_shared_access(&cache->metadata.lock); + } io = ocf_new_cache_io(cache, req->io_queue, - PAGES_TO_BYTES(meta_io_req->page), - PAGES_TO_BYTES(meta_io_req->count), - OCF_WRITE, 0, 0); + PAGES_TO_BYTES(m_req->page), + PAGES_TO_BYTES(m_req->count), + m_req->req.rw, 0, 0); if (!io) { - metadata_io_i_asynch_end(meta_io_req, -OCF_ERR_NO_MEM); + metadata_io_io_end(m_req, -OCF_ERR_NO_MEM); return 0; } /* Setup IO */ - ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_i_asynch_cmpl); - ret = ocf_io_set_data(io, meta_io_req->data, 0); + ocf_io_set_cmpl(io, m_req, NULL, metadata_io_io_cmpl); + ret = ocf_io_set_data(io, m_req->data, 0); if (ret) { ocf_io_put(io); - metadata_io_i_asynch_end(meta_io_req, ret); + metadata_io_io_end(m_req, ret); return ret; } ocf_volume_submit_io(io); return 0; } +static struct ocf_io_if metadata_io_restart_if = { + .read = metadata_io_restart_req, + .write = metadata_io_restart_req, +}; + +static void metadata_io_req_advance(struct metadata_io_request *m_req); + /* * Iterative asynchronous write callback */ -static void metadata_io_i_asynch_end(struct metadata_io_request *request, - int error) +static void metadata_io_io_end(struct metadata_io_request *m_req, int error) { - struct metadata_io_request_asynch *a_req; - ocf_cache_t cache; + struct metadata_io_request_asynch *a_req = m_req->asynch; + ocf_cache_t cache = m_req->cache; - OCF_CHECK_NULL(request); - - cache = request->cache; - - a_req = request->asynch; OCF_CHECK_NULL(a_req); OCF_CHECK_NULL(a_req->on_complete); if (error) { - request->error |= error; - request->asynch->error |= error; + a_req->error = a_req->error ?: error; } else { - if (request->fl_req.rw == OCF_READ) - metadata_io_req_drain(request); + if (m_req->req.rw == OCF_READ) + metadata_io_req_drain(m_req); } - if (env_atomic_dec_return(&request->req_remaining)) - return; + OCF_DEBUG_PARAM(cache, "Page = %u", m_req->page); - OCF_DEBUG_PARAM(cache, "Page = %u", request->page); + metadata_io_req_advance(m_req); - ctx_data_free(cache->owner, request->data); - request->data = NULL; - - if (env_atomic_dec_return(&a_req->req_remaining)) { - env_atomic_set(&request->finished, 1); - ocf_metadata_updater_kick(cache); - return; - } - - OCF_DEBUG_MSG(cache, "Asynchronous IO completed"); - - /* All IOs have been finished, call IO end callback */ - a_req->on_complete(request->cache, a_req->context, request->error); - - /* - * If it's last request, we mark is as finished - * after calling IO end callback - */ - env_atomic_set(&request->finished, 1); + env_atomic_set(&m_req->finished, 1); ocf_metadata_updater_kick(cache); } +static void metadata_io_req_submit(struct metadata_io_request *m_req) +{ + env_atomic_set(&m_req->finished, 0); + metadata_updater_submit(m_req); +} + +void metadata_io_req_end(struct metadata_io_request *m_req) +{ + struct metadata_io_request_asynch *a_req = m_req->asynch; + ocf_cache_t cache = m_req->cache; + + if (env_atomic_dec_return(&a_req->req_remaining) == 0) + a_req->on_complete(cache, a_req->context, a_req->error); + + ctx_data_free(cache->owner, m_req->data); +} + +void metadata_io_req_finalize(struct metadata_io_request *m_req) +{ + struct metadata_io_request_asynch *a_req = m_req->asynch; + + if (env_atomic_dec_return(&a_req->req_active) == 0) + env_free(a_req); +} + static uint32_t metadata_io_max_page(ocf_cache_t cache) { return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE; } -static void metadata_io_req_error(ocf_cache_t cache, - struct metadata_io_request_asynch *a_req, - uint32_t i, int error) +static void metadata_io_req_advance(struct metadata_io_request *m_req) { - a_req->error |= error; - a_req->reqs[i].error |= error; - a_req->reqs[i].count = 0; - if (a_req->reqs[i].data) - ctx_data_free(cache->owner, a_req->reqs[i].data); - a_req->reqs[i].data = NULL; + struct metadata_io_request_asynch *a_req = m_req->asynch; + uint32_t max_count = metadata_io_max_page(m_req->cache); + uint32_t curr; + + if (a_req->error) { + metadata_io_req_end(m_req); + return; + } + + curr = env_atomic_inc_return(&a_req->req_current); + + if (curr >= OCF_DIV_ROUND_UP(a_req->count, max_count)) { + m_req->count = 0; + metadata_io_req_end(m_req); + return; + } + + m_req->page = a_req->page + curr * max_count; + m_req->count = OCF_MIN(a_req->count - curr * max_count, max_count); +} + +static void metadata_io_req_start(struct metadata_io_request *m_req) +{ + struct metadata_io_request_asynch *a_req = m_req->asynch; + + env_atomic_inc(&a_req->req_remaining); + env_atomic_inc(&a_req->req_active); + + metadata_io_req_advance(m_req); + + if (m_req->count == 0) { + metadata_io_req_finalize(m_req); + return; + } + + metadata_io_req_submit(m_req); +} + +void metadata_io_req_complete(struct metadata_io_request *m_req) +{ + struct metadata_io_request_asynch *a_req = m_req->asynch; + + if (m_req->count == 0 || a_req->error) { + metadata_io_req_finalize(m_req); + return; + } + + metadata_io_req_submit(m_req); } /* @@ -328,15 +365,12 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, ocf_metadata_io_event_t io_hndl, ocf_metadata_io_end_t compl_hndl) { - uint32_t curr_count, written; - uint32_t max_count = metadata_io_max_page(cache); - uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count); - uint32_t i; - int error = 0, ret; - struct ocf_io *io; - - /* Allocation and initialization of asynchronous metadata IO request */ struct metadata_io_request_asynch *a_req; + struct metadata_io_request *m_req; + uint32_t max_count = metadata_io_max_page(cache); + uint32_t io_count = DIV_ROUND_UP(count, max_count); + uint32_t req_count = OCF_MIN(io_count, METADATA_IO_REQS_LIMIT); + int i; if (count == 0) return 0; @@ -345,140 +379,59 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir, if (!a_req) return -OCF_ERR_NO_MEM; - env_atomic_set(&a_req->req_remaining, io_count); - env_atomic_set(&a_req->req_active, io_count); + env_atomic_set(&a_req->req_remaining, 1); + env_atomic_set(&a_req->req_active, 1); + env_atomic_set(&a_req->req_current, -1); a_req->on_complete = compl_hndl; a_req->context = context; a_req->page = page; + a_req->count = count; - /* Allocate particular requests and initialize them */ - OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]), - io_count, &a_req->reqs_limit); - if (!a_req->reqs) { - env_free(a_req); - ocf_cache_log(cache, log_warn, - "No memory during metadata IO\n"); - return -OCF_ERR_NO_MEM; - } /* IO Requests initialization */ - for (i = 0; i < io_count; i++) { - env_atomic_set(&(a_req->reqs[i].req_remaining), 1); - env_atomic_set(&(a_req->reqs[i].finished), 0); - a_req->reqs[i].asynch = a_req; - } + for (i = 0; i < req_count; i++) { + m_req = &a_req->reqs[i]; - OCF_DEBUG_PARAM(cache, "IO count = %u", io_count); + m_req->asynch = a_req; + m_req->cache = cache; + m_req->context = context; + m_req->on_meta_fill = io_hndl; + m_req->on_meta_drain = io_hndl; + m_req->req.io_if = &metadata_io_restart_if; + m_req->req.io_queue = queue; + m_req->req.cache = cache; + m_req->req.priv = m_req; + m_req->req.info.internal = true; + m_req->req.rw = dir; + m_req->req.map = LIST_POISON1; - i = 0; - written = 0; - while (count) { - /* Get pages count of this IO iteration */ - if (count > max_count) - curr_count = max_count; - else - curr_count = count; - - /* Fill request */ - a_req->reqs[i].cache = cache; - a_req->reqs[i].context = context; - a_req->reqs[i].page = page + written; - a_req->reqs[i].count = curr_count; - a_req->reqs[i].on_meta_fill = io_hndl; - a_req->reqs[i].on_meta_drain = io_hndl; - a_req->reqs[i].fl_req.io_if = &meta_restart_if; - a_req->reqs[i].fl_req.io_queue = queue; - a_req->reqs[i].fl_req.cache = cache; - a_req->reqs[i].fl_req.priv = &a_req->reqs[i]; - a_req->reqs[i].fl_req.info.internal = true; - a_req->reqs[i].fl_req.rw = dir; - - /* - * We don't want allocate map for this request in - * threads. + /* If req_count == io_count and count is not multiple of + * max_count, for last we can allocate data smaller that + * max_count as we are sure it will never be resubmitted. */ - a_req->reqs[i].fl_req.map = LIST_POISON1; - - INIT_LIST_HEAD(&a_req->reqs[i].list); - - a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count); - if (!a_req->reqs[i].data) { - error = -OCF_ERR_NO_MEM; - metadata_io_req_error(cache, a_req, i, error); - break; - } - - /* Issue IO if it is not overlapping with anything else */ - ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]); - if (ret == 0) { - /* Allocate new IO */ - io = ocf_new_cache_io(cache, queue, - PAGES_TO_BYTES(a_req->reqs[i].page), - PAGES_TO_BYTES(a_req->reqs[i].count), - dir, 0, 0); - if (!io) { - error = -OCF_ERR_NO_MEM; - metadata_io_req_error(cache, a_req, i, error); - break; - } - - if (dir == OCF_WRITE) - metadata_io_req_fill(&a_req->reqs[i]); - - /* Setup IO */ - ocf_io_set_cmpl(io, &a_req->reqs[i], NULL, - metadata_io_i_asynch_cmpl); - error = ocf_io_set_data(io, a_req->reqs[i].data, 0); - if (error) { - ocf_io_put(io); - metadata_io_req_error(cache, a_req, i, error); - break; - } - - ocf_volume_submit_io(io); - } - - count -= curr_count; - written += curr_count; - i++; + m_req->data = ctx_data_alloc(cache->owner, + OCF_MIN(max_count, count - i * max_count)); + if (!m_req->data) + goto err; } - if (error == 0) { - /* No error, return 0 that indicates operation successful */ - return 0; - } - OCF_DEBUG_MSG(cache, "ERROR"); + for (i = 0; i < req_count; i++) + metadata_io_req_start(&a_req->reqs[i]); - if (i == 0) { - /* - * If no requests were submitted, we just call completion - * callback, free memory and return error. - */ - compl_hndl(cache, context, error); + if (env_atomic_dec_return(&a_req->req_remaining) == 0) + compl_hndl(cache, context, a_req->error); - OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit); + if (env_atomic_dec_return(&a_req->req_active) == 0) env_free(a_req); - return error; - } + return 0; - /* - * Decrement total reaming requests with IO that were not triggered. - * If we reached zero, we need to call completion callback. - */ - if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0) - compl_hndl(cache, context, error); +err: + while (i--) + ctx_data_free(cache->owner, a_req->reqs[i].data); + env_free(a_req); - /* - * Decrement total active requests with IO that were not triggered. - * If we reached zero, we need to free memory. - */ - if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) { - OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit); - env_free(a_req); - } - - return error; + return -OCF_ERR_NO_MEM; } int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue, diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h index f297be2..f3ae2fa 100644 --- a/src/metadata/metadata_io.h +++ b/src/metadata/metadata_io.h @@ -51,31 +51,34 @@ struct metadata_io_request { uint32_t count; ocf_metadata_io_event_t on_meta_fill; ocf_metadata_io_event_t on_meta_drain; - env_atomic req_remaining; ctx_data_t *data; int error; struct metadata_io_request_asynch *asynch; env_atomic finished; - struct ocf_request fl_req; + struct ocf_request req; struct list_head list; }; +#define METADATA_IO_REQS_LIMIT 128 + /* * Asynchronous IO request context */ struct metadata_io_request_asynch { - ocf_cache_t cache; - struct metadata_io_request *reqs; + struct metadata_io_request reqs[METADATA_IO_REQS_LIMIT]; void *context; int error; - size_t reqs_limit; env_atomic req_remaining; env_atomic req_active; + env_atomic req_current; uint32_t page; + uint32_t count; ocf_metadata_io_end_t on_complete; }; +void metadata_io_req_complete(struct metadata_io_request *m_req); + /** * @brief Metadata read end callback * diff --git a/src/metadata/metadata_updater.c b/src/metadata/metadata_updater.c index f3bb357..818a941 100644 --- a/src/metadata/metadata_updater.c +++ b/src/metadata/metadata_updater.c @@ -57,23 +57,14 @@ ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu) static int _metadata_updater_iterate_in_progress(ocf_cache_t cache, struct metadata_io_request *new_req) { - struct metadata_io_request_asynch *a_req; struct ocf_metadata_io_syncher *syncher = &cache->metadata_updater.syncher; struct metadata_io_request *curr, *temp; list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) { if (env_atomic_read(&curr->finished)) { - a_req = curr->asynch; - ENV_BUG_ON(!a_req); - list_del(&curr->list); - - if (env_atomic_dec_return(&a_req->req_active) == 0) { - OCF_REALLOC_DEINIT(&a_req->reqs, - &a_req->reqs_limit); - env_free(a_req); - } + metadata_io_req_complete(curr); continue; } if (new_req) { @@ -88,28 +79,29 @@ static int _metadata_updater_iterate_in_progress(ocf_cache_t cache, return 0; } -int metadata_updater_check_overlaps(ocf_cache_t cache, - struct metadata_io_request *req) +void metadata_updater_submit(struct metadata_io_request *m_req) { + ocf_cache_t cache = m_req->cache; struct ocf_metadata_io_syncher *syncher = &cache->metadata_updater.syncher; int ret; env_mutex_lock(&syncher->lock); - ret = _metadata_updater_iterate_in_progress(cache, req); + ret = _metadata_updater_iterate_in_progress(cache, m_req); /* Either add it to in-progress list or pending list for deferred * execution. */ if (ret == 0) - list_add_tail(&req->list, &syncher->in_progress_head); + list_add_tail(&m_req->list, &syncher->in_progress_head); else - list_add_tail(&req->list, &syncher->pending_head); + list_add_tail(&m_req->list, &syncher->pending_head); env_mutex_unlock(&syncher->lock); - return ret; + if (ret == 0) + ocf_engine_push_req_front(&m_req->req, true); } uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu) @@ -143,7 +135,7 @@ uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu) } env_mutex_unlock(&syncher->lock); if (ret == 0) - ocf_engine_push_req_front(&curr->fl_req, true); + ocf_engine_push_req_front(&curr->req, true); env_cond_resched(); env_mutex_lock(&syncher->lock); } diff --git a/src/metadata/metadata_updater_priv.h b/src/metadata/metadata_updater_priv.h index 5ec3176..0f2ee30 100644 --- a/src/metadata/metadata_updater_priv.h +++ b/src/metadata/metadata_updater_priv.h @@ -21,8 +21,7 @@ struct ocf_metadata_updater { }; -int metadata_updater_check_overlaps(ocf_cache_t cache, - struct metadata_io_request *req); +void metadata_updater_submit(struct metadata_io_request *m_req); int ocf_metadata_updater_init(struct ocf_cache *cache);