Move block stats counters to ioclass section.
Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
parent
8304ed84b8
commit
2450d3da4b
@ -47,6 +47,8 @@ static void ocf_stats_part_init(struct ocf_counters_part *stats)
|
||||
ocf_stats_req_init(&stats->write_reqs);
|
||||
|
||||
ocf_stats_block_init(&stats->blocks);
|
||||
ocf_stats_block_init(&stats->core_blocks);
|
||||
ocf_stats_block_init(&stats->cache_blocks);
|
||||
}
|
||||
|
||||
static void ocf_stats_error_init(struct ocf_counters_error *stats)
|
||||
@ -77,9 +79,6 @@ void ocf_core_stats_initialize(ocf_core_t core)
|
||||
|
||||
exp_obj_stats = core->counters;
|
||||
|
||||
ocf_stats_block_init(&exp_obj_stats->core_blocks);
|
||||
ocf_stats_block_init(&exp_obj_stats->cache_blocks);
|
||||
|
||||
ocf_stats_error_init(&exp_obj_stats->cache_errors);
|
||||
ocf_stats_error_init(&exp_obj_stats->core_errors);
|
||||
|
||||
@ -240,9 +239,6 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
|
||||
|
||||
env_atomic_read(&core->runtime_meta->cached_clines);
|
||||
|
||||
copy_block_stats(&stats->core_volume, &core_stats->core_blocks);
|
||||
copy_block_stats(&stats->cache_volume, &core_stats->cache_blocks);
|
||||
|
||||
copy_error_stats(&stats->core_errors,
|
||||
&core_stats->core_errors);
|
||||
copy_error_stats(&stats->cache_errors,
|
||||
@ -262,6 +258,8 @@ int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
|
||||
&curr->write_reqs);
|
||||
|
||||
accum_block_stats(&stats->core, &curr->blocks);
|
||||
accum_block_stats(&stats->core_volume, &curr->core_blocks);
|
||||
accum_block_stats(&stats->cache_volume, &curr->cache_blocks);
|
||||
|
||||
stats->cache_occupancy += env_atomic_read(&core->runtime_meta->
|
||||
part_counters[i].cached_clines);
|
||||
|
@ -31,6 +31,9 @@ struct ocf_counters_part {
|
||||
struct ocf_counters_req write_reqs;
|
||||
|
||||
struct ocf_counters_block blocks;
|
||||
|
||||
struct ocf_counters_block core_blocks;
|
||||
struct ocf_counters_block cache_blocks;
|
||||
};
|
||||
|
||||
#ifdef OCF_DEBUG_STATS
|
||||
@ -44,9 +47,6 @@ struct ocf_counters_debug {
|
||||
#endif
|
||||
|
||||
struct ocf_counters_core {
|
||||
struct ocf_counters_block core_blocks;
|
||||
struct ocf_counters_block cache_blocks;
|
||||
|
||||
struct ocf_counters_error core_errors;
|
||||
struct ocf_counters_error cache_errors;
|
||||
|
||||
|
@ -480,13 +480,14 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
|
||||
uint64_t addr, offset;
|
||||
int err;
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
|
||||
struct ocf_io *io;
|
||||
struct ocf_counters_block *core_stats =
|
||||
&cache->core[iter->core_id].counters->core_blocks;
|
||||
struct ocf_counters_block *core_stats;
|
||||
ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
|
||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||
iter->coll_idx);
|
||||
|
||||
core_stats = &core->counters->part_counters[part_id].core_blocks;
|
||||
|
||||
addr = (ocf_line_size(cache) * iter->core_line)
|
||||
+ SECTORS_TO_BYTES(begin);
|
||||
offset = (ocf_line_size(cache) * iter->hash)
|
||||
@ -637,7 +638,8 @@ static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
|
||||
*/
|
||||
static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
||||
{
|
||||
struct ocf_cache *cache = req->cache;
|
||||
ocf_cache_t cache = req->cache;
|
||||
ocf_core_t core;
|
||||
uint32_t i;
|
||||
struct ocf_map_info *iter = req->map;
|
||||
uint64_t addr, offset;
|
||||
@ -650,14 +652,12 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
||||
env_atomic_inc(&req->req_remaining);
|
||||
|
||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||
if (iter->core_id == OCF_CORE_MAX)
|
||||
core = ocf_cache_get_core(cache, iter->core_id);
|
||||
if (!core)
|
||||
continue;
|
||||
if (iter->status == LOOKUP_MISS)
|
||||
continue;
|
||||
|
||||
cache_stats = &cache->core[iter->core_id].
|
||||
counters->cache_blocks;
|
||||
|
||||
OCF_DEBUG_PARAM(req->cache, "Cache read, line = %u",
|
||||
iter->coll_idx);
|
||||
|
||||
@ -670,6 +670,8 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
||||
|
||||
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
|
||||
|
||||
cache_stats = &core->counters->part_counters[part_id].cache_blocks;
|
||||
|
||||
io = ocf_new_cache_io(cache, req->io_queue,
|
||||
addr, ocf_line_size(cache),
|
||||
OCF_READ, part_id, 0);
|
||||
|
@ -229,7 +229,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
{
|
||||
struct ocf_counters_block *cache_stats;
|
||||
uint64_t flags = req->ioi.io.flags;
|
||||
uint32_t class = req->ioi.io.io_class;
|
||||
uint32_t io_class = req->ioi.io.io_class;
|
||||
uint64_t addr, bytes, total_bytes = 0;
|
||||
struct ocf_io *io;
|
||||
int err;
|
||||
@ -240,7 +240,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
ENV_BUG_ON(req->byte_length < offset + size);
|
||||
ENV_BUG_ON(first_cl + reqs > req->core_line_count);
|
||||
|
||||
cache_stats = &req->core->counters->cache_blocks;
|
||||
cache_stats = &req->core->counters->part_counters[io_class].cache_blocks;
|
||||
|
||||
if (reqs == 1) {
|
||||
addr = ocf_metadata_map_lg2phy(cache,
|
||||
@ -251,7 +251,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
bytes = size;
|
||||
|
||||
io = ocf_new_cache_io(cache, req->io_queue,
|
||||
addr, bytes, dir, class, flags);
|
||||
addr, bytes, dir, io_class, flags);
|
||||
if (!io) {
|
||||
callback(req, -OCF_ERR_NO_MEM);
|
||||
goto update_stats;
|
||||
@ -298,7 +298,7 @@ void ocf_submit_cache_reqs(struct ocf_cache *cache,
|
||||
ENV_BUG_ON(bytes == 0);
|
||||
|
||||
io = ocf_new_cache_io(cache, req->io_queue,
|
||||
addr, bytes, dir, class, flags);
|
||||
addr, bytes, dir, io_class, flags);
|
||||
if (!io) {
|
||||
/* Finish all IOs which left with ERROR */
|
||||
for (; i < reqs; i++)
|
||||
@ -334,19 +334,19 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
|
||||
{
|
||||
struct ocf_counters_block *core_stats;
|
||||
uint64_t flags = req->ioi.io.flags;
|
||||
uint32_t class = req->ioi.io.io_class;
|
||||
uint32_t io_class = req->ioi.io.io_class;
|
||||
int dir = req->rw;
|
||||
struct ocf_io *io;
|
||||
int err;
|
||||
|
||||
core_stats = &req->core->counters->core_blocks;
|
||||
core_stats = &req->core->counters->part_counters[io_class].core_blocks;
|
||||
if (dir == OCF_WRITE)
|
||||
env_atomic64_add(req->byte_length, &core_stats->write_bytes);
|
||||
else if (dir == OCF_READ)
|
||||
env_atomic64_add(req->byte_length, &core_stats->read_bytes);
|
||||
|
||||
io = ocf_volume_new_io(volume, req->io_queue, req->byte_position,
|
||||
req->byte_length, dir, class, flags);
|
||||
req->byte_length, dir, io_class, flags);
|
||||
if (!io) {
|
||||
callback(req, -OCF_ERR_NO_MEM);
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user