Allocate requests for management path separately
Management path does not benefit much from mpools, as number of requests allocated is very small. It's less restrictive (mngt_queue does not have single-CPU affinity) thus avoiding mpool usage in management path allows to introduce additional restrictions on mpool, leading to I/O performance improvement. Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com> Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:

committed by
Michal Mielewczyk

parent
6cd5a27ea9
commit
460cd461d3
@@ -40,8 +40,7 @@
|
||||
static struct ocf_request *_ocf_cleaner_alloc_req(struct ocf_cache *cache,
|
||||
uint32_t count, const struct ocf_cleaner_attribs *attribs)
|
||||
{
|
||||
struct ocf_request *req = ocf_req_new_extended(attribs->io_queue, NULL,
|
||||
0, count * ocf_line_size(cache), OCF_READ);
|
||||
struct ocf_request *req = ocf_req_new_cleaner(attribs->io_queue, count);
|
||||
int ret;
|
||||
|
||||
if (!req)
|
||||
|
@@ -105,8 +105,7 @@ int ocf_parallelize_create(ocf_parallelize_t *parallelize,
|
||||
} else {
|
||||
queue = cache->mngt_queue;
|
||||
}
|
||||
tmp_parallelize->reqs[i] = ocf_req_new(queue,
|
||||
NULL, 0, 0, 0);
|
||||
tmp_parallelize->reqs[i] = ocf_req_new_mngt(queue);
|
||||
if (!tmp_parallelize->reqs[i]) {
|
||||
result = -OCF_ERR_NO_MEM;
|
||||
goto err_reqs;
|
||||
|
@@ -87,7 +87,7 @@ int ocf_pipeline_create(ocf_pipeline_t *pipeline, ocf_cache_t cache,
|
||||
tmp_pipeline->priv = (void *)priv;
|
||||
}
|
||||
|
||||
req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
|
||||
req = ocf_req_new_mngt(cache->mngt_queue);
|
||||
if (!req) {
|
||||
env_vfree(tmp_pipeline);
|
||||
return -OCF_ERR_NO_MEM;
|
||||
|
Reference in New Issue
Block a user