Added mechanism for preventing dirty requests.
Instead of switching write policy to pass-through, barrier is rised by incrementing counter in ocf_cache_t structure. Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
parent
ec37b2ea9e
commit
5c81824a8d
@ -233,7 +233,7 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
|
||||
mode = ocf_cache_mode_pt;
|
||||
|
||||
if (mode == ocf_cache_mode_wb &&
|
||||
env_atomic_read(&cache->flush_started))
|
||||
env_atomic_read(&cache->dirty_rq_barrier))
|
||||
mode = ocf_cache_mode_wt;
|
||||
|
||||
return mode;
|
||||
|
@ -1847,7 +1847,6 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
|
||||
{
|
||||
int i, j, no;
|
||||
int result;
|
||||
ocf_cache_mode_t mode;
|
||||
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
@ -1856,9 +1855,10 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
|
||||
if (!env_atomic_read(&cache->attached))
|
||||
return -EINVAL;
|
||||
|
||||
/* temporarily switch to PT */
|
||||
mode = cache->conf_meta->cache_mode;
|
||||
result = _cache_mng_set_cache_mode(cache, ocf_cache_mode_pt, true);
|
||||
/* prevent dirty io */
|
||||
env_atomic_inc(&cache->dirty_rq_barrier);
|
||||
|
||||
result = ocf_mngt_cache_flush(cache, true);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
@ -1867,10 +1867,7 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
|
||||
env_waitqueue_wait(cache->pending_cache_wq,
|
||||
!env_atomic_read(&cache->pending_cache_requests));
|
||||
|
||||
/* Restore original mode in metadata - it will be used when new
|
||||
cache device is attached. By this tume all requests are served
|
||||
in direct-to-core mode. */
|
||||
cache->conf_meta->cache_mode = mode;
|
||||
ENV_BUG_ON(env_atomic_dec_return(&cache->dirty_rq_barrier) < 0);
|
||||
|
||||
/* remove cacheline metadata and cleaning policy meta for all cores */
|
||||
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
|
||||
|
@ -18,7 +18,7 @@ static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache)
|
||||
{
|
||||
env_mutex_lock(&cache->flush_mutex);
|
||||
|
||||
env_atomic_set(&cache->flush_started, 1);
|
||||
env_atomic_inc(&cache->dirty_rq_barrier);
|
||||
|
||||
env_waitqueue_wait(cache->pending_dirty_wq,
|
||||
!env_atomic_read(&cache->pending_dirty_requests));
|
||||
@ -26,7 +26,7 @@ static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache)
|
||||
|
||||
static inline void _ocf_mngt_end_flush(struct ocf_cache *cache)
|
||||
{
|
||||
env_atomic_set(&cache->flush_started, 0);
|
||||
ENV_BUG_ON(env_atomic_dec_return(&cache->dirty_rq_barrier) < 0);
|
||||
|
||||
env_mutex_unlock(&cache->flush_mutex);
|
||||
}
|
||||
|
@ -194,7 +194,9 @@ struct ocf_cache {
|
||||
struct ocf_core_meta_runtime *core_runtime_meta;
|
||||
|
||||
env_atomic flush_in_progress;
|
||||
env_atomic flush_started;
|
||||
|
||||
/* Interpreted as a counter rather than a flag */
|
||||
env_atomic dirty_rq_barrier;
|
||||
|
||||
/* 1 if cache device attached, 0 otherwise */
|
||||
env_atomic attached;
|
||||
|
Loading…
Reference in New Issue
Block a user