Dynamic I/O queue management

- Queue allocation is now separated from starting cache.
- Queue can be created and destroyed in runtime.
- All queue ops accept queue handle instead of queue id.
- Cache stores queues as list instead of array.

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Michal Mielewczyk
2019-02-15 08:12:00 -05:00
committed by Robert Baldyga
parent 1771228a46
commit e53944d472
38 changed files with 379 additions and 445 deletions

View File

@@ -106,8 +106,7 @@ const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
return cache_mode_io_if_map[req_cache_mode];
}
struct ocf_request *ocf_engine_pop_req(struct ocf_cache *cache,
struct ocf_queue *q)
struct ocf_request *ocf_engine_pop_req(ocf_cache_t cache, ocf_queue_t q)
{
unsigned long lock_flags = 0;
struct ocf_request *req;
@@ -263,6 +262,7 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
ocf_req_cache_mode_t req_cache_mode)
{
const struct ocf_io_if *io_if;
int ret;
io_if = ocf_get_io_if(req_cache_mode);
if (!io_if)
@@ -270,12 +270,16 @@ int ocf_engine_hndl_fast_req(struct ocf_request *req,
switch (req->rw) {
case OCF_READ:
return io_if->read(req);
ret = io_if->read(req);
break;
case OCF_WRITE:
return io_if->write(req);
ret = io_if->write(req);
break;
default:
return OCF_FAST_PATH_NO;
}
return ret;
}
static void ocf_engine_hndl_2dc_req(struct ocf_request *req)

View File

@@ -117,7 +117,7 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
req->info.dirty_any++;
/* Check if cache line is fully dirty */
if (metadata_test_dirty_sec(cache, _entry->coll_idx,
if (metadata_test_dirty_sec(cache, _entry->coll_idx,
start_sector, end_sector))
req->info.dirty_all++;
}
@@ -488,14 +488,14 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
@@ -508,19 +508,18 @@ void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
q = req->io_queue;
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
@@ -533,7 +532,7 @@ void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front_if(struct ocf_request *req,