Count deferred requests as full miss
Otherwise, it may increase the number of hits, while the overall performance has not been improved. This way, the hit rate is more correlated with the performance changes. Signed-off-by: Michael Lyulko <michael.lyulko@huawei.com> Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
parent
35bf43b2e5
commit
470204ac70
@ -582,7 +582,8 @@ void ocf_engine_update_block_stats(struct ocf_request *req)
|
|||||||
void ocf_engine_update_request_stats(struct ocf_request *req)
|
void ocf_engine_update_request_stats(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_core_stats_request_update(req->core, req->part_id, req->rw,
|
ocf_core_stats_request_update(req->core, req->part_id, req->rw,
|
||||||
req->info.hit_no, req->core_line_count);
|
req->is_deferred ? 0 : req->info.hit_no,
|
||||||
|
req->core_line_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void inc_fallback_pt_error_counter(ocf_cache_t cache)
|
void inc_fallback_pt_error_counter(ocf_cache_t cache)
|
||||||
|
@ -225,6 +225,9 @@ struct ocf_request {
|
|||||||
uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS;
|
uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS;
|
||||||
/* !< Selected global metadata read lock */
|
/* !< Selected global metadata read lock */
|
||||||
|
|
||||||
|
uint8_t is_deferred : 1;
|
||||||
|
/* !< request handling was deferred and eventually resumed */
|
||||||
|
|
||||||
ocf_req_cache_mode_t cache_mode;
|
ocf_req_cache_mode_t cache_mode;
|
||||||
|
|
||||||
uint64_t timestamp;
|
uint64_t timestamp;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -693,6 +694,11 @@ void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
|||||||
ocf_alock_waitlist_unlock(alock, entry, flags);
|
ocf_alock_waitlist_unlock(alock, entry, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void update_deferred_flag(struct ocf_request *req, int lock)
|
||||||
|
{
|
||||||
|
req->is_deferred = (lock == OCF_LOCK_NOT_ACQUIRED);
|
||||||
|
}
|
||||||
|
|
||||||
int ocf_alock_lock_rd(struct ocf_alock *alock,
|
int ocf_alock_lock_rd(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
||||||
{
|
{
|
||||||
@ -730,6 +736,7 @@ int ocf_alock_lock_rd(struct ocf_alock *alock,
|
|||||||
env_mutex_unlock(&alock->lock);
|
env_mutex_unlock(&alock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
update_deferred_flag(req, lock);
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -769,6 +776,7 @@ int ocf_alock_lock_wr(struct ocf_alock *alock,
|
|||||||
env_mutex_unlock(&alock->lock);
|
env_mutex_unlock(&alock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
update_deferred_flag(req, lock);
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user