Added mechanism for preventing dirty requests.

Instead of switching write policy to pass-through, barrier is rised
by incrementing counter in ocf_cache_t structure.

Signed-off-by: Michal Mielewczyk <michal.mielewczyk@intel.com>
This commit is contained in:
Michal Mielewczyk 2019-01-21 07:59:17 -05:00
parent ec37b2ea9e
commit 5c81824a8d
4 changed files with 11 additions and 12 deletions

View File

@ -233,7 +233,7 @@ ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
mode = ocf_cache_mode_pt; mode = ocf_cache_mode_pt;
if (mode == ocf_cache_mode_wb && if (mode == ocf_cache_mode_wb &&
env_atomic_read(&cache->flush_started)) env_atomic_read(&cache->dirty_rq_barrier))
mode = ocf_cache_mode_wt; mode = ocf_cache_mode_wt;
return mode; return mode;

View File

@ -1847,7 +1847,6 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
{ {
int i, j, no; int i, j, no;
int result; int result;
ocf_cache_mode_t mode;
OCF_CHECK_NULL(cache); OCF_CHECK_NULL(cache);
@ -1856,9 +1855,10 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
if (!env_atomic_read(&cache->attached)) if (!env_atomic_read(&cache->attached))
return -EINVAL; return -EINVAL;
/* temporarily switch to PT */ /* prevent dirty io */
mode = cache->conf_meta->cache_mode; env_atomic_inc(&cache->dirty_rq_barrier);
result = _cache_mng_set_cache_mode(cache, ocf_cache_mode_pt, true);
result = ocf_mngt_cache_flush(cache, true);
if (result) if (result)
return result; return result;
@ -1867,10 +1867,7 @@ int ocf_mngt_cache_detach(ocf_cache_t cache)
env_waitqueue_wait(cache->pending_cache_wq, env_waitqueue_wait(cache->pending_cache_wq,
!env_atomic_read(&cache->pending_cache_requests)); !env_atomic_read(&cache->pending_cache_requests));
/* Restore original mode in metadata - it will be used when new ENV_BUG_ON(env_atomic_dec_return(&cache->dirty_rq_barrier) < 0);
cache device is attached. By this tume all requests are served
in direct-to-core mode. */
cache->conf_meta->cache_mode = mode;
/* remove cacheline metadata and cleaning policy meta for all cores */ /* remove cacheline metadata and cleaning policy meta for all cores */
for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {

View File

@ -18,7 +18,7 @@ static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache)
{ {
env_mutex_lock(&cache->flush_mutex); env_mutex_lock(&cache->flush_mutex);
env_atomic_set(&cache->flush_started, 1); env_atomic_inc(&cache->dirty_rq_barrier);
env_waitqueue_wait(cache->pending_dirty_wq, env_waitqueue_wait(cache->pending_dirty_wq,
!env_atomic_read(&cache->pending_dirty_requests)); !env_atomic_read(&cache->pending_dirty_requests));
@ -26,7 +26,7 @@ static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache)
static inline void _ocf_mngt_end_flush(struct ocf_cache *cache) static inline void _ocf_mngt_end_flush(struct ocf_cache *cache)
{ {
env_atomic_set(&cache->flush_started, 0); ENV_BUG_ON(env_atomic_dec_return(&cache->dirty_rq_barrier) < 0);
env_mutex_unlock(&cache->flush_mutex); env_mutex_unlock(&cache->flush_mutex);
} }

View File

@ -194,7 +194,9 @@ struct ocf_cache {
struct ocf_core_meta_runtime *core_runtime_meta; struct ocf_core_meta_runtime *core_runtime_meta;
env_atomic flush_in_progress; env_atomic flush_in_progress;
env_atomic flush_started;
/* Interpreted as a counter rather than a flag */
env_atomic dirty_rq_barrier;
/* 1 if cache device attached, 0 otherwise */ /* 1 if cache device attached, 0 otherwise */
env_atomic attached; env_atomic attached;