Introduce io_queues_lock
The queues can be created and destroyed dynamically at any point in the cache lifetime, and this can happen from different execution contexts, thus there is a need to protect the queue_list with a lock. Signed-off-by: Robert Baldyga <robert.baldyga@huawei.com> Signed-off-by: Michal Mielewczyk <michal.mielewczyk@huawei.com>
This commit is contained in:
parent
8db93260ae
commit
b00ab08473
@ -850,6 +850,11 @@ static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
|
||||
goto lock_err;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&cache->io_queues);
|
||||
result = env_spinlock_init(&cache->io_queues_lock);
|
||||
if (result)
|
||||
goto mutex_err;
|
||||
|
||||
ENV_BUG_ON(!ocf_refcnt_inc(&cache->refcnt.cache));
|
||||
|
||||
/* start with freezed metadata ref counter to indicate detached device*/
|
||||
@ -865,6 +870,8 @@ static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
|
||||
|
||||
return 0;
|
||||
|
||||
mutex_err:
|
||||
env_mutex_destroy(&cache->flush_mutex);
|
||||
lock_err:
|
||||
ocf_mngt_cache_unlock(cache);
|
||||
ocf_mngt_cache_lock_deinit(cache);
|
||||
@ -1438,6 +1445,8 @@ static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
|
||||
if (!params->flags.cache_alloc)
|
||||
return;
|
||||
|
||||
env_spinlock_destroy(&cache->io_queues_lock);
|
||||
|
||||
env_mutex_destroy(&cache->flush_mutex);
|
||||
|
||||
ocf_mngt_cache_lock_deinit(cache);
|
||||
@ -1466,8 +1475,6 @@ static void _ocf_mngt_cache_init(ocf_cache_t cache,
|
||||
cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy;
|
||||
__set_cleaning_policy(cache, ocf_cleaning_default);
|
||||
|
||||
INIT_LIST_HEAD(&cache->io_queues);
|
||||
|
||||
/* Init Partitions */
|
||||
ocf_user_part_init(cache);
|
||||
__init_free(cache);
|
||||
@ -2197,6 +2204,9 @@ static void ocf_mngt_cache_remove(ocf_ctx_t ctx, ocf_cache_t cache)
|
||||
|
||||
/* Deinitialize locks */
|
||||
ocf_mngt_cache_lock_deinit(cache);
|
||||
|
||||
env_spinlock_destroy(&cache->io_queues_lock);
|
||||
|
||||
env_mutex_destroy(&cache->flush_mutex);
|
||||
|
||||
/* Remove cache from the list */
|
||||
|
@ -107,6 +107,8 @@ struct ocf_cache {
|
||||
struct ocf_cleaner cleaner;
|
||||
|
||||
struct list_head io_queues;
|
||||
env_spinlock io_queues_lock;
|
||||
|
||||
ocf_promotion_policy_t promotion_policy;
|
||||
|
||||
struct {
|
||||
|
@ -48,6 +48,7 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
|
||||
{
|
||||
ocf_queue_t tmp_queue;
|
||||
int result;
|
||||
unsigned long flags = 0;
|
||||
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
@ -68,7 +69,9 @@ int ocf_queue_create(ocf_cache_t cache, ocf_queue_t *queue,
|
||||
return result;
|
||||
}
|
||||
|
||||
env_spinlock_lock_irqsave(&cache->io_queues_lock, flags);
|
||||
list_add(&tmp_queue->list, &cache->io_queues);
|
||||
env_spinlock_unlock_irqrestore(&cache->io_queues_lock, flags);
|
||||
|
||||
*queue = tmp_queue;
|
||||
|
||||
@ -112,18 +115,24 @@ void ocf_queue_get(ocf_queue_t queue)
|
||||
|
||||
void ocf_queue_put(ocf_queue_t queue)
|
||||
{
|
||||
ocf_cache_t cache = queue->cache;
|
||||
unsigned long flags = 0;
|
||||
|
||||
OCF_CHECK_NULL(queue);
|
||||
|
||||
if (env_atomic_dec_return(&queue->ref_count) == 0) {
|
||||
queue->ops->stop(queue);
|
||||
if (queue != queue->cache->mngt_queue) {
|
||||
list_del(&queue->list);
|
||||
ocf_queue_seq_cutoff_deinit(queue);
|
||||
}
|
||||
ocf_mngt_cache_put(queue->cache);
|
||||
env_spinlock_destroy(&queue->io_list_lock);
|
||||
env_free(queue);
|
||||
if (env_atomic_dec_return(&queue->ref_count))
|
||||
return;
|
||||
|
||||
queue->ops->stop(queue);
|
||||
if (queue != queue->cache->mngt_queue) {
|
||||
env_spinlock_lock_irqsave(&cache->io_queues_lock, flags);
|
||||
list_del(&queue->list);
|
||||
env_spinlock_unlock_irqrestore(&cache->io_queues_lock, flags);
|
||||
ocf_queue_seq_cutoff_deinit(queue);
|
||||
}
|
||||
ocf_mngt_cache_put(queue->cache);
|
||||
env_spinlock_destroy(&queue->io_list_lock);
|
||||
env_free(queue);
|
||||
}
|
||||
|
||||
void ocf_io_handle(struct ocf_io *io, void *opaque)
|
||||
|
Loading…
Reference in New Issue
Block a user