From 83b4455a0ed996ace26770a13245ddb0f5132c51 Mon Sep 17 00:00:00 2001 From: Adam Rutkowski Date: Mon, 5 Sep 2022 19:41:29 +0200 Subject: [PATCH] unify cache write error stats accounting In most (6/9) instances across engines ocf_core_stats_cache_error_update is called upon each cache volume I/O error, possibly multiple times per a user request in case of multi-cacheline requests. Backfill, fast and read engine are exceptions, incrementing error stats only once per user request. This commit unifies ocf_core_stats_cache_error_update usage so that in all the engines error statistic is incremented for once for every error. Signed-off-by: Adam Rutkowski --- src/engine/engine_bf.c | 7 ++++--- src/engine/engine_fast.c | 7 ++++--- src/engine/engine_rd.c | 9 ++++----- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/engine/engine_bf.c b/src/engine/engine_bf.c index 43351c1..eb7df06 100644 --- a/src/engine/engine_bf.c +++ b/src/engine/engine_bf.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2012-2022 Intel Corporation * SPDX-License-Identifier: BSD-3-Clause */ @@ -41,8 +41,10 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error) { struct ocf_cache *cache = req->cache; - if (error) + if (error) { req->error = error; + ocf_core_stats_cache_error_update(req->core, OCF_WRITE); + } if (req->error) inc_fallback_pt_error_counter(req->cache); @@ -63,7 +65,6 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error) req->data = NULL; if (req->error) { - ocf_core_stats_cache_error_update(req->core, OCF_WRITE); ocf_engine_invalidate(req); } else { ocf_req_unlock(ocf_cache_line_concurrency(cache), req); diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c index f878f07..16b2391 100644 --- a/src/engine/engine_fast.c +++ b/src/engine/engine_fast.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2012-2022 Intel Corporation * SPDX-License-Identifier: BSD-3-Clause */ @@ -30,8 +30,10 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error) { - if (error) + if (error) { req->error |= error; + ocf_core_stats_cache_error_update(req->core, OCF_READ); + } if (env_atomic_dec_return(&req->req_remaining)) { /* Not all requests finished */ @@ -43,7 +45,6 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error) if (req->error) { OCF_DEBUG_RQ(req, "ERROR"); - ocf_core_stats_cache_error_update(req->core, OCF_READ); ocf_engine_push_req_front_pt(req); } else { ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req); diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 19b747b..c6433b3 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2012-2022 Intel Corporation * SPDX-License-Identifier: BSD-3-Clause */ @@ -27,11 +27,11 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error) struct ocf_alock *c = ocf_cache_line_concurrency( req->cache); - if (error) + if (error) { req->error |= error; - - if (req->error) + ocf_core_stats_cache_error_update(req->core, OCF_READ); inc_fallback_pt_error_counter(req->cache); + } /* Handle callback-caller race to let only one of the two complete the * request. Also, complete original request only if this is the last @@ -41,7 +41,6 @@ static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error) OCF_DEBUG_RQ(req, "HIT completion"); if (req->error) { - ocf_core_stats_cache_error_update(req->core, OCF_READ); ocf_engine_push_req_front_pt(req); } else { ocf_req_unlock(c, req);