Replace submit with forward in cleaner
Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com> Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
parent
834786866c
commit
322ae2687d
@ -529,27 +529,23 @@ int ocf_engine_prepare_clines(struct ocf_request *req)
|
|||||||
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
|
||||||
void *getter_context, uint32_t item, ocf_cache_line_t *line)
|
void *getter_context, uint32_t item, ocf_cache_line_t *line)
|
||||||
{
|
{
|
||||||
struct ocf_cleaner_attribs *attribs = getter_context;
|
struct ocf_request *req = getter_context;
|
||||||
struct ocf_request *req = attribs->cmpl_context;
|
struct ocf_map_info *entry;
|
||||||
|
|
||||||
for (; attribs->getter_item < req->core_line_count;
|
if (unlikely(item >= req->core_line_count))
|
||||||
attribs->getter_item++) {
|
return -1;
|
||||||
|
|
||||||
struct ocf_map_info *entry = &req->map[attribs->getter_item];
|
entry = &req->map[item];
|
||||||
|
|
||||||
if (entry->status != LOOKUP_HIT)
|
if (entry->status != LOOKUP_HIT)
|
||||||
continue;
|
return -1;
|
||||||
|
|
||||||
if (!metadata_test_dirty(cache, entry->coll_idx))
|
if (!metadata_test_dirty(cache, entry->coll_idx))
|
||||||
continue;
|
return -1;
|
||||||
|
|
||||||
/* Line to be cleaned found, go to next item and return */
|
/* Line to be cleaned found, go to next item and return */
|
||||||
*line = entry->coll_idx;
|
*line = entry->coll_idx;
|
||||||
attribs->getter_item++;
|
return 0;
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ocf_engine_clean(struct ocf_request *req)
|
void ocf_engine_clean(struct ocf_request *req)
|
||||||
@ -562,10 +558,9 @@ void ocf_engine_clean(struct ocf_request *req)
|
|||||||
.cmpl_fn = _ocf_engine_clean_end,
|
.cmpl_fn = _ocf_engine_clean_end,
|
||||||
|
|
||||||
.getter = _ocf_engine_clean_getter,
|
.getter = _ocf_engine_clean_getter,
|
||||||
.getter_context = &attribs,
|
.getter_context = req,
|
||||||
.getter_item = 0,
|
|
||||||
|
|
||||||
.count = req->info.dirty_any,
|
.count = req->core_line_count,
|
||||||
.io_queue = req->io_queue
|
.io_queue = req->io_queue
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -139,11 +139,6 @@ static struct ocf_request *_ocf_cleaner_alloc_slave_req(
|
|||||||
/* Slave request contains reference to master */
|
/* Slave request contains reference to master */
|
||||||
req->master_io_req = master;
|
req->master_io_req = master;
|
||||||
|
|
||||||
/* One more additional slave request, increase global counter
|
|
||||||
* of requests count
|
|
||||||
*/
|
|
||||||
env_atomic_inc(&master->master_remaining);
|
|
||||||
|
|
||||||
OCF_DEBUG_PARAM(req->cache,
|
OCF_DEBUG_PARAM(req->cache,
|
||||||
"New slave request, count = %u,all requests count = %d",
|
"New slave request, count = %u,all requests count = %d",
|
||||||
count, env_atomic_read(&master->master_remaining));
|
count, env_atomic_read(&master->master_remaining));
|
||||||
@ -281,38 +276,22 @@ static void _ocf_cleaner_finish_req(struct ocf_request *req)
|
|||||||
_ocf_cleaner_dealloc_req(req);
|
_ocf_cleaner_dealloc_req(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_flush_cache_io_end(struct ocf_io *io, int error)
|
static void _ocf_cleaner_flush_cache_end(struct ocf_request *req, int error)
|
||||||
{
|
{
|
||||||
struct ocf_request *req = io->priv1;
|
if (error)
|
||||||
|
|
||||||
if (error) {
|
|
||||||
ocf_metadata_error(req->cache);
|
ocf_metadata_error(req->cache);
|
||||||
req->error = error;
|
|
||||||
}
|
|
||||||
|
|
||||||
OCF_DEBUG_MSG(req->cache, "Cache flush finished");
|
OCF_DEBUG_MSG(req->cache, "Cache flush finished");
|
||||||
|
|
||||||
_ocf_cleaner_finish_req(req);
|
_ocf_cleaner_finish_req(req);
|
||||||
|
|
||||||
ocf_io_put(io);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
|
static int _ocf_cleaner_fire_flush_cache(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
struct ocf_io *io;
|
|
||||||
|
|
||||||
OCF_DEBUG_TRACE(req->cache);
|
OCF_DEBUG_TRACE(req->cache);
|
||||||
|
|
||||||
io = ocf_new_cache_io(req->cache, req->io_queue, 0, 0, OCF_WRITE, 0, 0);
|
req->cache_forward_end = _ocf_cleaner_flush_cache_end;
|
||||||
if (!io) {
|
ocf_req_forward_cache_flush(req);
|
||||||
ocf_metadata_error(req->cache);
|
|
||||||
req->error = -OCF_ERR_NO_MEM;
|
|
||||||
return -OCF_ERR_NO_MEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
ocf_io_set_cmpl(io, req, NULL, _ocf_cleaner_flush_cache_io_end);
|
|
||||||
|
|
||||||
ocf_volume_submit_flush(io);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -361,7 +340,6 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
|||||||
if (metadata_test_dirty(cache, cache_line)) {
|
if (metadata_test_dirty(cache, cache_line)) {
|
||||||
ocf_metadata_get_core_and_part_id(cache, cache_line,
|
ocf_metadata_get_core_and_part_id(cache, cache_line,
|
||||||
&core_id, &req->part_id);
|
&core_id, &req->part_id);
|
||||||
req->core = &cache->core[core_id];
|
|
||||||
|
|
||||||
ocf_metadata_start_collision_shared_access(cache,
|
ocf_metadata_start_collision_shared_access(cache,
|
||||||
cache_line);
|
cache_line);
|
||||||
@ -384,30 +362,25 @@ static int _ocf_cleaner_update_metadata(struct ocf_request *req)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
|
static void _ocf_cleaner_flush_core_end(struct ocf_request *req, int error)
|
||||||
struct ocf_request *req, int error)
|
|
||||||
{
|
{
|
||||||
uint32_t i;
|
|
||||||
struct ocf_map_info *iter = req->map;
|
struct ocf_map_info *iter = req->map;
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
OCF_DEBUG_MSG(req->cache, "Core flush finished");
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
/* Flush error, set error for all cache line of this core */
|
/* Flush error, set error for all cleaned cache lines */
|
||||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||||
if (!iter->flush)
|
if (!iter->flush)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (iter->core_id == map->core_id)
|
iter->invalid = true;
|
||||||
iter->invalid = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ocf_cleaner_set_error(req);
|
_ocf_cleaner_set_error(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (env_atomic_dec_return(&req->req_remaining))
|
|
||||||
return;
|
|
||||||
|
|
||||||
OCF_DEBUG_MSG(req->cache, "Core flush finished");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All core writes done, switch to post cleaning activities
|
* All core writes done, switch to post cleaning activities
|
||||||
*/
|
*/
|
||||||
@ -415,103 +388,45 @@ static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map,
|
|||||||
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error)
|
static int _ocf_cleaner_fire_flush_core(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
_ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error);
|
req->core_forward_end = _ocf_cleaner_flush_core_end;
|
||||||
|
|
||||||
ocf_io_put(io);
|
/* Submit flush request */
|
||||||
}
|
ocf_req_forward_core_flush(req);
|
||||||
|
|
||||||
static int _ocf_cleaner_fire_flush_cores(struct ocf_request *req)
|
|
||||||
{
|
|
||||||
uint32_t i;
|
|
||||||
ocf_core_id_t core_id = OCF_CORE_MAX;
|
|
||||||
struct ocf_cache *cache = req->cache;
|
|
||||||
struct ocf_map_info *iter = req->map;
|
|
||||||
ocf_core_t core;
|
|
||||||
struct ocf_io *io;
|
|
||||||
|
|
||||||
OCF_DEBUG_TRACE(req->cache);
|
|
||||||
|
|
||||||
/* Protect IO completion race */
|
|
||||||
env_atomic_set(&req->req_remaining, 1);
|
|
||||||
|
|
||||||
/* Submit flush requests */
|
|
||||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
|
||||||
if (iter->invalid) {
|
|
||||||
/* IO error, skip this item */
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!iter->flush)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (core_id == iter->core_id)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
core_id = iter->core_id;
|
|
||||||
|
|
||||||
env_atomic_inc(&req->req_remaining);
|
|
||||||
|
|
||||||
core = ocf_cache_get_core(cache, core_id);
|
|
||||||
io = ocf_new_core_io(core, req->io_queue, 0, 0,
|
|
||||||
OCF_WRITE, 0, 0);
|
|
||||||
if (!io) {
|
|
||||||
_ocf_cleaner_flush_cores_io_end(iter, req, -OCF_ERR_NO_MEM);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_flush_cores_io_cmpl);
|
|
||||||
|
|
||||||
ocf_volume_submit_flush(io);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Protect IO completion race */
|
|
||||||
_ocf_cleaner_flush_cores_io_end(NULL, req, 0);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_core_io_end(struct ocf_request *req)
|
static void _ocf_cleaner_core_io_end(struct ocf_request *req, int error)
|
||||||
{
|
{
|
||||||
if (env_atomic_dec_return(&req->req_remaining))
|
struct ocf_map_info *iter = req->map;
|
||||||
return;
|
uint32_t i;
|
||||||
|
|
||||||
OCF_DEBUG_MSG(req->cache, "Core writes finished");
|
OCF_DEBUG_MSG(req->cache, "Core writes finished");
|
||||||
|
|
||||||
/*
|
|
||||||
* All cache read requests done, now we can submit writes to cores,
|
|
||||||
* Move processing to thread, where IO will be (and can be) submitted
|
|
||||||
*/
|
|
||||||
req->engine_handler = _ocf_cleaner_fire_flush_cores;
|
|
||||||
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error)
|
|
||||||
{
|
|
||||||
struct ocf_map_info *map = io->priv1;
|
|
||||||
struct ocf_request *req = io->priv2;
|
|
||||||
ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
|
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
map->invalid |= 1;
|
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||||
|
if (!iter->flush)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iter->invalid = true;
|
||||||
|
|
||||||
|
ocf_core_stats_core_error_update(req->core, OCF_WRITE);
|
||||||
|
}
|
||||||
|
|
||||||
_ocf_cleaner_set_error(req);
|
_ocf_cleaner_set_error(req);
|
||||||
ocf_core_stats_core_error_update(core, OCF_WRITE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ocf_cleaner_core_io_end(req);
|
req->engine_handler = _ocf_cleaner_fire_flush_core;
|
||||||
|
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
||||||
ocf_io_put(io);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
|
static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
|
||||||
struct ocf_map_info *iter, uint64_t begin, uint64_t end)
|
struct ocf_map_info *iter, uint64_t begin, uint64_t end)
|
||||||
{
|
{
|
||||||
uint64_t addr, offset;
|
uint64_t addr, offset;
|
||||||
int err;
|
|
||||||
ocf_cache_t cache = req->cache;
|
ocf_cache_t cache = req->cache;
|
||||||
struct ocf_io *io;
|
|
||||||
ocf_core_t core = ocf_cache_get_core(cache, iter->core_id);
|
|
||||||
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
|
||||||
iter->coll_idx);
|
iter->coll_idx);
|
||||||
|
|
||||||
@ -520,36 +435,15 @@ static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *req,
|
|||||||
offset = (ocf_line_size(cache) * iter->hash)
|
offset = (ocf_line_size(cache) * iter->hash)
|
||||||
+ SECTORS_TO_BYTES(begin);
|
+ SECTORS_TO_BYTES(begin);
|
||||||
|
|
||||||
io = ocf_new_core_io(core, req->io_queue, addr,
|
ocf_core_stats_core_block_update(req->core, part_id, OCF_WRITE,
|
||||||
SECTORS_TO_BYTES(end - begin), OCF_WRITE, part_id, 0);
|
|
||||||
if (!io)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
err = ocf_io_set_data(io, req->data, offset);
|
|
||||||
if (err) {
|
|
||||||
ocf_io_put(io);
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_core_io_cmpl);
|
|
||||||
|
|
||||||
ocf_core_stats_core_block_update(core, part_id, OCF_WRITE,
|
|
||||||
SECTORS_TO_BYTES(end - begin));
|
SECTORS_TO_BYTES(end - begin));
|
||||||
|
|
||||||
OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
|
OCF_DEBUG_PARAM(req->cache, "Core write, line = %llu, "
|
||||||
"sector = %llu, count = %llu", iter->core_line, begin,
|
"sector = %llu, count = %llu", iter->core_line, begin,
|
||||||
end - begin);
|
end - begin);
|
||||||
|
|
||||||
/* Increase IO counter to be processed */
|
ocf_req_forward_core_io(req, OCF_WRITE, addr,
|
||||||
env_atomic_inc(&req->req_remaining);
|
SECTORS_TO_BYTES(end - begin), offset);
|
||||||
|
|
||||||
/* Send IO */
|
|
||||||
ocf_volume_submit_io(io);
|
|
||||||
|
|
||||||
return;
|
|
||||||
error:
|
|
||||||
iter->invalid = true;
|
|
||||||
_ocf_cleaner_set_error(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
|
static void _ocf_cleaner_core_submit_io(struct ocf_request *req,
|
||||||
@ -600,10 +494,10 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
|
|||||||
|
|
||||||
OCF_DEBUG_TRACE(req->cache);
|
OCF_DEBUG_TRACE(req->cache);
|
||||||
|
|
||||||
/* Protect IO completion race */
|
req->core_forward_end = _ocf_cleaner_core_io_end;
|
||||||
env_atomic_set(&req->req_remaining, 1);
|
|
||||||
|
|
||||||
/* Submits writes to the core */
|
/* Submits writes to the core */
|
||||||
|
ocf_req_forward_core_get(req);
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
iter = &(req->map[i]);
|
iter = &(req->map[i]);
|
||||||
|
|
||||||
@ -625,43 +519,32 @@ static int _ocf_cleaner_fire_core(struct ocf_request *req)
|
|||||||
req->lock_idx, req->map[i].core_id,
|
req->lock_idx, req->map[i].core_id,
|
||||||
req->map[i].core_line);
|
req->map[i].core_line);
|
||||||
}
|
}
|
||||||
|
ocf_req_forward_core_put(req);
|
||||||
/* Protect IO completion race */
|
|
||||||
_ocf_cleaner_core_io_end(req);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _ocf_cleaner_cache_io_end(struct ocf_request *req)
|
static void _ocf_cleaner_cache_io_end(struct ocf_request *req, int error)
|
||||||
{
|
{
|
||||||
if (env_atomic_dec_return(&req->req_remaining))
|
struct ocf_map_info *iter = req->map;
|
||||||
return;
|
uint32_t i;
|
||||||
|
|
||||||
/*
|
|
||||||
* All cache read requests done, now we can submit writes to cores,
|
|
||||||
* Move processing to thread, where IO will be (and can be) submitted
|
|
||||||
*/
|
|
||||||
req->engine_handler = _ocf_cleaner_fire_core;
|
|
||||||
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
|
||||||
|
|
||||||
OCF_DEBUG_MSG(req->cache, "Cache reads finished");
|
OCF_DEBUG_MSG(req->cache, "Cache reads finished");
|
||||||
}
|
|
||||||
|
|
||||||
static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
|
|
||||||
{
|
|
||||||
struct ocf_map_info *map = io->priv1;
|
|
||||||
struct ocf_request *req = io->priv2;
|
|
||||||
ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
|
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
map->invalid |= 1;
|
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||||
|
if (!iter->flush)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
iter->invalid = true;
|
||||||
|
|
||||||
|
ocf_core_stats_cache_error_update(req->core, OCF_READ);
|
||||||
|
}
|
||||||
_ocf_cleaner_set_error(req);
|
_ocf_cleaner_set_error(req);
|
||||||
ocf_core_stats_cache_error_update(core, OCF_READ);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ocf_cleaner_cache_io_end(req);
|
req->engine_handler = _ocf_cleaner_fire_core;
|
||||||
|
ocf_queue_push_req(req, OCF_QUEUE_ALLOW_SYNC | OCF_QUEUE_PRIO_HIGH);
|
||||||
ocf_io_put(io);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -671,21 +554,16 @@ static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
|
|||||||
static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_cache_t cache = req->cache;
|
ocf_cache_t cache = req->cache;
|
||||||
ocf_core_t core;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
struct ocf_map_info *iter = req->map;
|
struct ocf_map_info *iter = req->map;
|
||||||
uint64_t addr, offset;
|
uint64_t addr, offset;
|
||||||
ocf_part_id_t part_id;
|
ocf_part_id_t part_id;
|
||||||
struct ocf_io *io;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Protect IO completion race */
|
req->cache_forward_end = _ocf_cleaner_cache_io_end;
|
||||||
env_atomic_set(&req->req_remaining, 1);
|
req->byte_length = ocf_line_size(cache);
|
||||||
|
|
||||||
|
ocf_req_forward_cache_get(req);
|
||||||
for (i = 0; i < req->core_line_count; i++, iter++) {
|
for (i = 0; i < req->core_line_count; i++, iter++) {
|
||||||
core = ocf_cache_get_core(cache, iter->core_id);
|
|
||||||
if (!core)
|
|
||||||
continue;
|
|
||||||
if (!iter->flush)
|
if (!iter->flush)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -700,35 +578,15 @@ static int _ocf_cleaner_fire_cache(struct ocf_request *req)
|
|||||||
|
|
||||||
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
|
part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx);
|
||||||
|
|
||||||
io = ocf_new_cache_io(cache, req->io_queue,
|
ocf_core_stats_cache_block_update(req->core, part_id, OCF_READ,
|
||||||
addr, ocf_line_size(cache),
|
|
||||||
OCF_READ, part_id, 0);
|
|
||||||
if (!io) {
|
|
||||||
/* Allocation error */
|
|
||||||
iter->invalid = true;
|
|
||||||
_ocf_cleaner_set_error(req);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ocf_io_set_cmpl(io, iter, req, _ocf_cleaner_cache_io_cmpl);
|
|
||||||
err = ocf_io_set_data(io, req->data, offset);
|
|
||||||
if (err) {
|
|
||||||
ocf_io_put(io);
|
|
||||||
iter->invalid = true;
|
|
||||||
_ocf_cleaner_set_error(req);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ocf_core_stats_cache_block_update(core, part_id, OCF_READ,
|
|
||||||
ocf_line_size(cache));
|
ocf_line_size(cache));
|
||||||
|
|
||||||
env_atomic_inc(&req->req_remaining);
|
req->byte_position = iter->core_line * ocf_line_size(cache);
|
||||||
|
|
||||||
ocf_volume_submit_io(io);
|
ocf_req_forward_cache_io(req, OCF_READ, addr,
|
||||||
|
ocf_line_size(cache), offset);
|
||||||
}
|
}
|
||||||
|
ocf_req_forward_cache_put(req);
|
||||||
/* Protect IO completion race */
|
|
||||||
_ocf_cleaner_cache_io_end(req);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -760,17 +618,23 @@ static int _ocf_cleaner_check_map(struct ocf_request *req)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t count)
|
static int _ocf_cleaner_do_fire(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
|
struct ocf_request *master;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
req->engine_handler = _ocf_cleaner_check_map;
|
req->engine_handler = _ocf_cleaner_check_map;
|
||||||
req->core_line_count = count;
|
req->byte_position = req->core_line_count * ocf_line_size(req->cache);
|
||||||
|
|
||||||
|
master = (req->master_io_req_type == ocf_cleaner_req_type_master) ?
|
||||||
|
req : req->master_io_req;
|
||||||
|
|
||||||
/* Handle cache lines locks */
|
/* Handle cache lines locks */
|
||||||
result = _ocf_cleaner_cache_line_lock(req);
|
result = _ocf_cleaner_cache_line_lock(req);
|
||||||
|
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
|
env_atomic_inc(&master->master_remaining);
|
||||||
|
|
||||||
if (result == OCF_LOCK_ACQUIRED) {
|
if (result == OCF_LOCK_ACQUIRED) {
|
||||||
OCF_DEBUG_MSG(req->cache, "Lock acquired");
|
OCF_DEBUG_MSG(req->cache, "Lock acquired");
|
||||||
_ocf_cleaner_check_map(req);
|
_ocf_cleaner_check_map(req);
|
||||||
@ -789,24 +653,62 @@ static void _ocf_cleaner_fire_error(struct ocf_request *master,
|
|||||||
struct ocf_request *req, int err)
|
struct ocf_request *req, int err)
|
||||||
{
|
{
|
||||||
master->error = err;
|
master->error = err;
|
||||||
_ocf_cleaner_complete_req(req);
|
|
||||||
_ocf_cleaner_dealloc_req(req);
|
_ocf_cleaner_dealloc_req(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t ocf_cleaner_populate_req(struct ocf_request *req, uint32_t curr,
|
||||||
|
const struct ocf_cleaner_attribs *attribs)
|
||||||
|
{
|
||||||
|
uint32_t count = attribs->count;
|
||||||
|
uint32_t map_max = req->core_line_count, map_curr;
|
||||||
|
ocf_cache_line_t cache_line;
|
||||||
|
uint64_t core_sector;
|
||||||
|
ocf_core_id_t core_id, last_core_id = OCF_CORE_ID_INVALID;
|
||||||
|
|
||||||
|
for (map_curr = 0; map_curr < map_max && curr < count; curr++) {
|
||||||
|
if (attribs->getter(req->cache, attribs->getter_context,
|
||||||
|
curr, &cache_line)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get mapping info */
|
||||||
|
ocf_metadata_get_core_info(req->cache, cache_line,
|
||||||
|
&core_id, &core_sector);
|
||||||
|
|
||||||
|
if (last_core_id == OCF_CORE_ID_INVALID) {
|
||||||
|
last_core_id = core_id;
|
||||||
|
req->core = ocf_cache_get_core(req->cache, core_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (core_id != last_core_id)
|
||||||
|
break;
|
||||||
|
|
||||||
|
req->map[map_curr].core_id = core_id;
|
||||||
|
req->map[map_curr].core_line = core_sector;
|
||||||
|
req->map[map_curr].coll_idx = cache_line;
|
||||||
|
req->map[map_curr].status = LOOKUP_HIT;
|
||||||
|
req->map[map_curr].hash = map_curr;
|
||||||
|
map_curr++;
|
||||||
|
}
|
||||||
|
|
||||||
|
req->core_line_count = map_curr;
|
||||||
|
|
||||||
|
return curr;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cleaner - Main function
|
* cleaner - Main function
|
||||||
*/
|
*/
|
||||||
void ocf_cleaner_fire(struct ocf_cache *cache,
|
void ocf_cleaner_fire(struct ocf_cache *cache,
|
||||||
const struct ocf_cleaner_attribs *attribs)
|
const struct ocf_cleaner_attribs *attribs)
|
||||||
{
|
{
|
||||||
uint32_t i, i_out = 0, count = attribs->count;
|
uint32_t count = attribs->count, curr = 0;
|
||||||
/* max cache lines to be cleaned with one request: 1024 if over 4k lines
|
/* max cache lines to be cleaned with one request: 1024 if over 4k lines
|
||||||
* to be flushed, otherwise 128. for large cleaning operations, 1024 is
|
* to be flushed, otherwise 128. for large cleaning operations, 1024 is
|
||||||
* optimal number, but for smaller 1024 is too large to benefit from
|
* optimal number, but for smaller 1024 is too large to benefit from
|
||||||
* cleaning request overlapping
|
* cleaning request overlapping
|
||||||
*/
|
*/
|
||||||
uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
|
uint32_t max = _ocf_cleaner_get_req_max_count(count, false);
|
||||||
ocf_cache_line_t cache_line;
|
|
||||||
/* it is possible that more than one cleaning request will be generated
|
/* it is possible that more than one cleaning request will be generated
|
||||||
* for each cleaning order, thus multiple allocations. At the end of
|
* for each cleaning order, thus multiple allocations. At the end of
|
||||||
* loop, req is set to zero and NOT deallocated, as deallocation is
|
* loop, req is set to zero and NOT deallocated, as deallocation is
|
||||||
@ -817,8 +719,6 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
|||||||
*/
|
*/
|
||||||
struct ocf_request *req = NULL, *master;
|
struct ocf_request *req = NULL, *master;
|
||||||
int err;
|
int err;
|
||||||
ocf_core_id_t core_id;
|
|
||||||
uint64_t core_sector;
|
|
||||||
|
|
||||||
/* Allocate master request */
|
/* Allocate master request */
|
||||||
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
master = _ocf_cleaner_alloc_master_req(cache, max, attribs);
|
||||||
@ -827,70 +727,41 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
req = master;
|
curr = ocf_cleaner_populate_req(master, curr, attribs);
|
||||||
|
|
||||||
env_atomic_inc(&master->master_remaining);
|
|
||||||
for (i = 0; i < count; i++) {
|
|
||||||
/* when request hasn't yet been allocated or is just issued */
|
|
||||||
if (unlikely(!req)) {
|
|
||||||
if (max > count - i) {
|
|
||||||
/* less than max left */
|
|
||||||
max = count - i;
|
|
||||||
}
|
|
||||||
|
|
||||||
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
|
|
||||||
if (unlikely(!req)) {
|
|
||||||
master->error = -OCF_ERR_NO_MEM;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (attribs->getter(cache, attribs->getter_context,
|
|
||||||
i, &cache_line)) {
|
|
||||||
OCF_DEBUG_MSG(cache, "Skip");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get mapping info */
|
|
||||||
ocf_metadata_get_core_info(cache, cache_line, &core_id,
|
|
||||||
&core_sector);
|
|
||||||
|
|
||||||
if (unlikely(!cache->core[core_id].opened)) {
|
|
||||||
OCF_DEBUG_MSG(cache, "Core object inactive");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
req->map[i_out].core_id = core_id;
|
|
||||||
req->map[i_out].core_line = core_sector;
|
|
||||||
req->map[i_out].coll_idx = cache_line;
|
|
||||||
req->map[i_out].status = LOOKUP_HIT;
|
|
||||||
req->map[i_out].hash = i_out;
|
|
||||||
i_out++;
|
|
||||||
|
|
||||||
if (max == i_out) {
|
|
||||||
err = _ocf_cleaner_do_fire(req, i_out);
|
|
||||||
if (err) {
|
|
||||||
_ocf_cleaner_fire_error(master, req, err);
|
|
||||||
req = NULL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
i_out = 0;
|
|
||||||
req = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (unlikely(master->core_line_count == 0)) {
|
||||||
|
_ocf_cleaner_dealloc_req(master);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (req && i_out) {
|
err = _ocf_cleaner_do_fire(master);
|
||||||
err = _ocf_cleaner_do_fire(req, i_out);
|
if (err) {
|
||||||
if (err)
|
_ocf_cleaner_fire_error(master, master, err);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (curr < count) {
|
||||||
|
max = OCF_MIN(max, count - curr);
|
||||||
|
req = _ocf_cleaner_alloc_slave_req(master, max, attribs);
|
||||||
|
if (!req) {
|
||||||
|
master->error = -OCF_ERR_NO_MEM;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
curr = ocf_cleaner_populate_req(req, curr, attribs);
|
||||||
|
if (unlikely(req->core_line_count == 0)) {
|
||||||
|
_ocf_cleaner_dealloc_req(req);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = _ocf_cleaner_do_fire(req);
|
||||||
|
if (err) {
|
||||||
_ocf_cleaner_fire_error(master, req, err);
|
_ocf_cleaner_fire_error(master, req, err);
|
||||||
req = NULL;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
_ocf_cleaner_complete_req(master);
|
_ocf_cleaner_complete_req(master);
|
||||||
|
|
||||||
if (req && !i_out)
|
|
||||||
_ocf_cleaner_dealloc_req(req);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,
|
static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache,
|
||||||
|
@ -41,10 +41,6 @@ struct ocf_cleaner_attribs {
|
|||||||
/*!< Getter for collecting cache lines which will be cleaned */
|
/*!< Getter for collecting cache lines which will be cleaned */
|
||||||
void *getter_context;
|
void *getter_context;
|
||||||
/*!< Context for getting cache lines */
|
/*!< Context for getting cache lines */
|
||||||
uint32_t getter_item;
|
|
||||||
/*!< Additional variable that can be used by cleaner call
|
|
||||||
* to iterate over items
|
|
||||||
*/
|
|
||||||
|
|
||||||
ocf_queue_t io_queue;
|
ocf_queue_t io_queue;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user