Synchronization of collision table
Adding synchronization around metadata collision segment pages. This part of metadata is modified when cacheline is mapped/unmapped and when dirty status changes. Synchronization on page level is required on top of cacheline and hash bucket locks to assure metadata flush always reads consistent state when copying entire collision table memory page. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
5684b53d9b
commit
be3b402162
@ -260,8 +260,10 @@ static void ocf_engine_map_cache_line(struct ocf_request *req,
|
||||
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
|
||||
|
||||
/* Add the block to the corresponding collision list */
|
||||
ocf_metadata_start_collision_shared_access(cache, *cache_line);
|
||||
ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index,
|
||||
*cache_line);
|
||||
ocf_metadata_end_collision_shared_access(cache, *cache_line);
|
||||
|
||||
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
|
||||
|
||||
@ -295,9 +297,17 @@ static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
|
||||
case LOOKUP_MAPPED:
|
||||
OCF_DEBUG_RQ(req, "Canceling cache line %u",
|
||||
entry->coll_idx);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache,
|
||||
entry->coll_idx);
|
||||
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
entry->coll_idx);
|
||||
|
||||
ocf_metadata_end_collision_shared_access(cache,
|
||||
entry->coll_idx);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -432,9 +432,13 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
||||
evp_lru_zero_line(cache, io_queue, curr_cline);
|
||||
|
||||
} else {
|
||||
ocf_metadata_start_collision_shared_access(cache,
|
||||
curr_cline);
|
||||
set_cache_line_invalid_no_flush(cache, 0,
|
||||
ocf_line_end_sector(cache),
|
||||
curr_cline);
|
||||
ocf_metadata_end_collision_shared_access(cache,
|
||||
curr_cline);
|
||||
|
||||
/* Goto next item. */
|
||||
i++;
|
||||
|
@ -113,9 +113,9 @@ ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache)
|
||||
void ocf_metadata_flush_all(ocf_cache_t cache,
|
||||
ocf_metadata_end_t cmpl, void *priv)
|
||||
{
|
||||
ocf_metadata_start_exclusive_access(&cache->metadata.lock);
|
||||
ocf_metadata_start_shared_access(&cache->metadata.lock);
|
||||
cache->metadata.iface.flush_all(cache, cmpl, priv);
|
||||
ocf_metadata_end_exclusive_access(&cache->metadata.lock);
|
||||
ocf_metadata_end_shared_access(&cache->metadata.lock);
|
||||
}
|
||||
|
||||
void ocf_metadata_load_all(ocf_cache_t cache,
|
||||
|
@ -2568,7 +2568,7 @@ void ocf_metadata_hash_end_collision_shared_access(struct ocf_cache *cache,
|
||||
&ctrl->raw_desc[metadata_segment_collision];
|
||||
uint32_t page = ocf_metadata_raw_page(raw, line);
|
||||
|
||||
ocf_collision_start_shared_access(&cache->metadata.lock, page);
|
||||
ocf_collision_end_shared_access(&cache->metadata.lock, page);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -226,9 +226,9 @@ static int ocf_restart_meta_io(struct ocf_request *req)
|
||||
int ret;
|
||||
|
||||
/* Fill with the latest metadata. */
|
||||
/* TODO: synchronize with concurrent metadata io and hash bucket locks
|
||||
*/
|
||||
ocf_metadata_start_shared_access(&cache->metadata.lock);
|
||||
metadata_io_req_fill(meta_io_req);
|
||||
ocf_metadata_end_shared_access(&cache->metadata.lock);
|
||||
|
||||
io = ocf_new_cache_io(cache, req->io_queue,
|
||||
PAGES_TO_BYTES(meta_io_req->page),
|
||||
|
@ -107,6 +107,8 @@ void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
|
||||
static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
|
||||
uint32_t cache_line)
|
||||
{
|
||||
ocf_metadata_start_collision_shared_access(cache, cache_line);
|
||||
|
||||
set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache),
|
||||
cache_line);
|
||||
|
||||
@ -114,6 +116,8 @@ static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
|
||||
* This is especially for removing inactive core
|
||||
*/
|
||||
metadata_clear_dirty(cache, cache_line);
|
||||
|
||||
ocf_metadata_end_collision_shared_access(cache, cache_line);
|
||||
}
|
||||
|
||||
/* caller must hold metadata lock
|
||||
|
@ -976,17 +976,8 @@ static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
|
||||
cache->conf_meta->metadata_layout)) {
|
||||
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
|
||||
}
|
||||
|
||||
context->flags.attached_metadata_inited = true;
|
||||
|
||||
if (ocf_metadata_concurrency_attached_init(&cache->metadata.lock,
|
||||
cache, cache->device->hash_table_entries,
|
||||
ocf_metadata_get_num_collision_pages(cache))) {
|
||||
ocf_cache_log(cache, log_err, "Failed to initialize attached "
|
||||
"metadata concurrency\n");
|
||||
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
|
||||
}
|
||||
|
||||
cache->freelist = ocf_freelist_init(cache);
|
||||
if (!cache->freelist)
|
||||
OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
|
||||
@ -1729,7 +1720,6 @@ static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
|
||||
|
||||
ocf_volume_close(&cache->device->volume);
|
||||
|
||||
ocf_metadata_concurrency_attached_deinit(&cache->metadata.lock);
|
||||
ocf_metadata_deinit_variable_size(cache);
|
||||
ocf_concurrency_deinit(cache);
|
||||
ocf_freelist_deinit(cache->freelist);
|
||||
|
@ -72,6 +72,10 @@ static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache,
|
||||
/**
|
||||
* @brief Set cache line invalid
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
@ -85,6 +89,10 @@ void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
/**
|
||||
* @brief Set cache line invalid without flush
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
@ -96,6 +104,10 @@ void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
|
||||
/**
|
||||
* @brief Set cache line valid
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
@ -108,6 +120,10 @@ void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
|
||||
/**
|
||||
* @brief Set cache line clean
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
@ -120,6 +136,10 @@ void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
|
||||
/**
|
||||
* @brief Set cache line dirty
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start_bit Start bit of cache line for which state will be set
|
||||
* @param end_bit End bit of cache line for which state will be set
|
||||
@ -163,6 +183,10 @@ static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
|
||||
/**
|
||||
* @brief Set cache line clean and invalid and remove form lists
|
||||
*
|
||||
* @note Collision page must be locked by the caller (either exclusive access
|
||||
* to collision table page OR write lock on metadata hash bucket combined with
|
||||
* shared access to the collision page)
|
||||
*
|
||||
* @param cache Cache instance
|
||||
* @param start Start bit of range in cache line to purge
|
||||
* @param end End bit of range in cache line to purge
|
||||
@ -224,8 +248,12 @@ static inline void ocf_purge_map_info(struct ocf_request *req)
|
||||
ocf_line_sectors(cache);
|
||||
}
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
_ocf_purge_cache_line_sec(cache, start_bit, end_bit, req,
|
||||
map_idx);
|
||||
ocf_metadata_end_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,7 +301,11 @@ static inline void ocf_set_valid_map_info(struct ocf_request *req)
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
set_cache_line_valid(cache, start_bit, end_bit, req, map_idx);
|
||||
ocf_metadata_end_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -284,6 +316,7 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req)
|
||||
uint8_t end_bit;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
struct ocf_map_info *map = req->map;
|
||||
|
||||
/* Set valid bits for sectors on the basis of map info
|
||||
*
|
||||
@ -295,7 +328,12 @@ static inline void ocf_set_dirty_map_info(struct ocf_request *req)
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
set_cache_line_dirty(cache, start_bit, end_bit, req, map_idx);
|
||||
ocf_metadata_end_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -306,6 +344,7 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req)
|
||||
uint8_t end_bit;
|
||||
struct ocf_cache *cache = req->cache;
|
||||
uint32_t count = req->core_line_count;
|
||||
struct ocf_map_info *map = req->map;
|
||||
|
||||
/* Set valid bits for sectors on the basis of map info
|
||||
*
|
||||
@ -317,7 +356,12 @@ static inline void ocf_set_clean_map_info(struct ocf_request *req)
|
||||
for (map_idx = 0; map_idx < count; map_idx++) {
|
||||
start_bit = ocf_map_line_start_sector(req, map_idx);
|
||||
end_bit = ocf_map_line_end_sector(req, map_idx);
|
||||
|
||||
ocf_metadata_start_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
set_cache_line_clean(cache, start_bit, end_bit, req, map_idx);
|
||||
ocf_metadata_end_collision_shared_access(cache, map[map_idx].
|
||||
coll_idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user