cleaner: Move sort functionality to flush_data abstraction

The flush_data is used by ocf_cleaner_do_flush_data_async(), which means
that callers of ocf_cleaner_fire() are now expected to guarantee that
entries are returned by getter in a sorted order. Currently the only case
when ocf_cleaner_fire() is called directly is for request cleaning, and
the request map is sorted by definition.

Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com>
This commit is contained in:
Robert Baldyga 2024-06-14 13:37:36 +02:00
parent dd4add45e1
commit 2b94a3ab31
5 changed files with 16 additions and 44 deletions

View File

@ -652,7 +652,6 @@ static void _acp_flush(struct acp_context *acp)
.cmpl_fn = _acp_flush_end, .cmpl_fn = _acp_flush_end,
.lock_cacheline = false, .lock_cacheline = false,
.lock_metadata = true, .lock_metadata = true,
.do_sort = false,
.cmpl_queue = true, .cmpl_queue = true,
.io_queue = cache->cleaner.io_queue, .io_queue = cache->cleaner.io_queue,
}; };

View File

@ -911,8 +911,9 @@ static void alru_clean(struct alru_context *ctx)
to_clean = get_data_to_flush(ctx); to_clean = get_data_to_flush(ctx);
if (to_clean > 0) { if (to_clean > 0) {
fctx->flush_perfomed = true; fctx->flush_perfomed = true;
ocf_cleaner_do_flush_data_async(cache, fctx->flush_data, to_clean, ocf_cleaner_sort_flush_data(fctx->flush_data, to_clean);
&fctx->attribs); ocf_cleaner_do_flush_data_async(cache, fctx->flush_data,
to_clean, &fctx->attribs);
ocf_metadata_end_exclusive_access(&cache->metadata.lock); ocf_metadata_end_exclusive_access(&cache->metadata.lock);
return; return;
} }
@ -940,7 +941,6 @@ void cleaning_alru_perform_cleaning(ocf_cache_t cache, ocf_cleaner_end_t cmpl)
fctx->attribs.cmpl_fn = alru_clean_complete; fctx->attribs.cmpl_fn = alru_clean_complete;
fctx->attribs.lock_cacheline = true; fctx->attribs.lock_cacheline = true;
fctx->attribs.lock_metadata = false; fctx->attribs.lock_metadata = false;
fctx->attribs.do_sort = true;
fctx->attribs.io_queue = cache->cleaner.io_queue; fctx->attribs.io_queue = cache->cleaner.io_queue;
fctx->attribs.cmpl_queue = true; fctx->attribs.cmpl_queue = true;

View File

@ -592,7 +592,6 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
struct ocf_cleaner_attribs attribs = { struct ocf_cleaner_attribs attribs = {
.lock_cacheline = false, .lock_cacheline = false,
.lock_metadata = true, .lock_metadata = true,
.do_sort = true,
.cmpl_context = ctx, .cmpl_context = ctx,
.cmpl_fn = ocf_lru_clean_end, .cmpl_fn = ocf_lru_clean_end,
@ -647,6 +646,7 @@ void ocf_lru_clean(ocf_cache_t cache, struct ocf_user_part *user_part,
return; return;
} }
ocf_cleaner_sort_flush_data(entries, i);
ocf_cleaner_do_flush_data_async(cache, entries, i, &attribs); ocf_cleaner_do_flush_data_async(cache, entries, i, &attribs);
} }

View File

@ -746,22 +746,6 @@ static int _ocf_cleaner_fire(struct ocf_request *req)
return result; return result;
} }
/* Helper function for 'sort' */
static int _ocf_cleaner_cmp_private(const void *a, const void *b)
{
struct ocf_map_info *_a = (struct ocf_map_info *)a;
struct ocf_map_info *_b = (struct ocf_map_info *)b;
static uint32_t step = 0;
OCF_COND_RESCHED_DEFAULT(step);
if (_a->core_id == _b->core_id)
return (_a->core_line > _b->core_line) ? 1 : -1;
return (_a->core_id > _b->core_id) ? 1 : -1;
}
/** /**
* Prepare cleaning request to be fired * Prepare cleaning request to be fired
* *
@ -769,10 +753,8 @@ static int _ocf_cleaner_cmp_private(const void *a, const void *b)
* @param i_out number of already filled map requests (remaining to be filled * @param i_out number of already filled map requests (remaining to be filled
* with missed * with missed
*/ */
static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out, static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out)
bool do_sort)
{ {
uint32_t i;
/* Set counts of cache IOs */ /* Set counts of cache IOs */
env_atomic_set(&req->req_remaining, i_out); env_atomic_set(&req->req_remaining, i_out);
@ -786,14 +768,6 @@ static int _ocf_cleaner_do_fire(struct ocf_request *req, uint32_t i_out,
req->map[i_out].hash = i_out; req->map[i_out].hash = i_out;
} }
if (do_sort) {
/* Sort by core id and core line */
env_sort(req->map, req->core_line_count, sizeof(req->map[0]),
_ocf_cleaner_cmp_private, NULL);
for (i = 0; i < req->core_line_count; i++)
req->map[i].hash = i;
}
/* issue actual request */ /* issue actual request */
return _ocf_cleaner_fire(req); return _ocf_cleaner_fire(req);
} }
@ -945,7 +919,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
i_out++; i_out++;
if (max == i_out) { if (max == i_out) {
err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort); err = _ocf_cleaner_do_fire(req, i_out);
if (err) { if (err) {
_ocf_cleaner_fire_error(master, req, err); _ocf_cleaner_fire_error(master, req, err);
req = NULL; req = NULL;
@ -958,7 +932,7 @@ void ocf_cleaner_fire(struct ocf_cache *cache,
} }
if (req) { if (req) {
err = _ocf_cleaner_do_fire(req, i_out, attribs->do_sort); err = _ocf_cleaner_do_fire(req, i_out);
if (err) if (err)
_ocf_cleaner_fire_error(master, req, err); _ocf_cleaner_fire_error(master, req, err);
req = NULL; req = NULL;
@ -1023,9 +997,10 @@ static void _ocf_cleaner_swap(void *a, void *b, int size)
*_b = t; *_b = t;
} }
void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num) void ocf_cleaner_sort_flush_data(struct flush_data *flush_data, uint32_t count)
{ {
env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap); env_sort(flush_data, count, sizeof(*flush_data),
_ocf_cleaner_cmp, _ocf_cleaner_swap);
} }
void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
@ -1034,9 +1009,8 @@ void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
env_sort(fctbl[i].flush_data, fctbl[i].count, ocf_cleaner_sort_flush_data(fctbl[i].flush_data,
sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp, fctbl[i].count);
_ocf_cleaner_swap);
} }
} }

View File

@ -30,7 +30,6 @@ struct ocf_cleaner_attribs {
uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */ uint8_t lock_cacheline : 1; /*!< Cleaner to lock cachelines on its own */
uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */ uint8_t lock_metadata : 1; /*!< Cleaner to lock metadata on its own */
uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
uint8_t cmpl_queue : 1; uint8_t cmpl_queue : 1;
/*!< Completion needs to be called from the queue context */ /*!< Completion needs to be called from the queue context */
@ -122,15 +121,15 @@ int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
/** /**
* @brief Sort flush data by core sector * @brief Sort flush data by core sector
* *
* @param tbl Flush data to sort * @param flush_data Flush data to sort
* @param num Number of entries in tbl * @param count Number of entries in flush_data
*/ */
void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num); void ocf_cleaner_sort_flush_data(struct flush_data *flush_data, uint32_t count);
/** /**
* @brief Sort flush data in all flush containters * @brief Sort flush data in all flush containters
* *
* @param tbl Flush containers to sort * @param fctbl Flush containers to sort
* @param num Number of entries in fctbl * @param num Number of entries in fctbl
*/ */
void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,