Count deferred requests as full miss

Otherwise, it may increase the number of hits, while the overall performance
has not been improved. This way, the hit rate is more correlated with
the performance changes.

Signed-off-by: Michael Lyulko <michael.lyulko@huawei.com>
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
Michael Lyulko 2022-05-26 16:32:33 +03:00 committed by Michal Mielewczyk
parent 35bf43b2e5
commit 470204ac70
3 changed files with 13 additions and 1 deletions

View File

@ -582,7 +582,8 @@ void ocf_engine_update_block_stats(struct ocf_request *req)
void ocf_engine_update_request_stats(struct ocf_request *req)
{
ocf_core_stats_request_update(req->core, req->part_id, req->rw,
req->info.hit_no, req->core_line_count);
req->is_deferred ? 0 : req->info.hit_no,
req->core_line_count);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)

View File

@ -225,6 +225,9 @@ struct ocf_request {
uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS;
/* !< Selected global metadata read lock */
uint8_t is_deferred : 1;
/* !< request handling was deferred and eventually resumed */
ocf_req_cache_mode_t cache_mode;
uint64_t timestamp;

View File

@ -1,5 +1,6 @@
/*
* Copyright(c) 2012-2022 Intel Corporation
* Copyright(c) 2024 Huawei Technologies
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -693,6 +694,11 @@ void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
ocf_alock_waitlist_unlock(alock, entry, flags);
}
static inline void update_deferred_flag(struct ocf_request *req, int lock)
{
req->is_deferred = (lock == OCF_LOCK_NOT_ACQUIRED);
}
int ocf_alock_lock_rd(struct ocf_alock *alock,
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
{
@ -730,6 +736,7 @@ int ocf_alock_lock_rd(struct ocf_alock *alock,
env_mutex_unlock(&alock->lock);
}
update_deferred_flag(req, lock);
return lock;
}
@ -769,6 +776,7 @@ int ocf_alock_lock_wr(struct ocf_alock *alock,
env_mutex_unlock(&alock->lock);
}
update_deferred_flag(req, lock);
return lock;
}