Dynamic I/O queue management

- Queue allocation is now separated from starting cache.
- Queue can be created and destroyed in runtime.
- All queue ops accept queue handle instead of queue id.
- Cache stores queues as list instead of array.

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Michal Mielewczyk
2019-02-15 08:12:00 -05:00
committed by Robert Baldyga
parent 1771228a46
commit e53944d472
38 changed files with 379 additions and 445 deletions

View File

@@ -117,7 +117,7 @@ void ocf_engine_update_req_info(struct ocf_cache *cache,
req->info.dirty_any++;
/* Check if cache line is fully dirty */
if (metadata_test_dirty_sec(cache, _entry->coll_idx,
if (metadata_test_dirty_sec(cache, _entry->coll_idx,
start_sector, end_sector))
req->info.dirty_all++;
}
@@ -488,14 +488,14 @@ void ocf_engine_update_request_stats(struct ocf_request *req)
void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
ENV_BUG_ON(!req->io_queue);
q = req->io_queue;
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
@@ -508,19 +508,18 @@ void ocf_engine_push_req_back(struct ocf_request *req, bool allow_sync)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
{
struct ocf_cache *cache = req->cache;
struct ocf_queue *q = NULL;
ocf_cache_t cache = req->cache;
ocf_queue_t q = NULL;
unsigned long lock_flags = 0;
INIT_LIST_HEAD(&req->list);
ENV_BUG_ON(req->io_queue >= cache->io_queues_no);
q = &cache->io_queues[req->io_queue];
q = req->io_queue;
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
@@ -533,7 +532,7 @@ void ocf_engine_push_req_front(struct ocf_request *req, bool allow_sync)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
ocf_queue_kick(q, allow_sync);
}
void ocf_engine_push_req_front_if(struct ocf_request *req,