Remove functions to wait for cache pending requests
Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
92e9ae12f1
commit
962f9d17d1
@ -160,20 +160,6 @@ bool ocf_cache_is_device_attached(ocf_cache_t cache);
|
||||
*/
|
||||
bool ocf_cache_is_running(ocf_cache_t cache);
|
||||
|
||||
/**
|
||||
* @brief Wait for all IO to finish
|
||||
*
|
||||
* @param[in] cache Cache object
|
||||
*/
|
||||
void ocf_cache_wait_for_io_finish(ocf_cache_t cache);
|
||||
|
||||
/**
|
||||
* @brief Check if cache has any unfunished requests
|
||||
*
|
||||
* @param[in] cache Cache object
|
||||
*/
|
||||
bool ocf_cache_has_pending_requests(ocf_cache_t cache);
|
||||
|
||||
/**
|
||||
* @brief Check if cleaning triggered by eviction runs on the cache
|
||||
*
|
||||
|
@ -53,26 +53,6 @@ bool ocf_cache_is_device_attached(ocf_cache_t cache)
|
||||
return !ocf_refcnt_frozen(&cache->refcnt.metadata);
|
||||
}
|
||||
|
||||
void ocf_cache_wait_for_io_finish(ocf_cache_t cache)
|
||||
{
|
||||
uint32_t req_active = 0;
|
||||
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
do {
|
||||
req_active = ocf_req_get_allocated(cache);
|
||||
if (req_active)
|
||||
env_msleep(500);
|
||||
} while (req_active);
|
||||
}
|
||||
|
||||
bool ocf_cache_has_pending_requests(ocf_cache_t cache)
|
||||
{
|
||||
OCF_CHECK_NULL(cache);
|
||||
|
||||
return ocf_req_get_allocated(cache) > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is temporary workaround allowing to check if cleaning triggered
|
||||
* by eviction policy is running on the cache. This information is needed
|
||||
|
@ -168,8 +168,6 @@ struct ocf_cache {
|
||||
|
||||
char name[OCF_CACHE_NAME_SIZE];
|
||||
|
||||
env_atomic pending_requests;
|
||||
|
||||
struct {
|
||||
struct ocf_refcnt dirty;
|
||||
struct ocf_refcnt metadata;
|
||||
|
@ -191,9 +191,6 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
|
||||
req->core_id = core ? ocf_core_get_id(core) : 0;
|
||||
req->cache = cache;
|
||||
|
||||
if (queue != cache->mngt_queue)
|
||||
env_atomic_inc(&cache->pending_requests);
|
||||
|
||||
req->d2c = (queue != cache->mngt_queue) && !ocf_refcnt_inc(
|
||||
&cache->refcnt.metadata);
|
||||
|
||||
@ -278,9 +275,6 @@ void ocf_req_put(struct ocf_request *req)
|
||||
if (!req->d2c && req->io_queue != req->cache->mngt_queue)
|
||||
ocf_refcnt_dec(&req->cache->refcnt.metadata);
|
||||
|
||||
if (req->io_queue != req->cache->mngt_queue)
|
||||
env_atomic_dec(&req->cache->pending_requests);
|
||||
|
||||
allocator = _ocf_req_get_allocator(req->cache,
|
||||
req->alloc_core_line_count);
|
||||
if (allocator) {
|
||||
@ -302,8 +296,3 @@ void ocf_req_clear_map(struct ocf_request *req)
|
||||
ENV_BUG_ON(env_memset(req->map,
|
||||
sizeof(req->map[0]) * req->core_line_count, 0));
|
||||
}
|
||||
|
||||
uint32_t ocf_req_get_allocated(struct ocf_cache *cache)
|
||||
{
|
||||
return env_atomic_read(&cache->pending_requests);
|
||||
}
|
||||
|
@ -83,15 +83,6 @@ struct ocf_request *ocf_req_new_extended(ocf_queue_t queue, ocf_core_t core,
|
||||
struct ocf_request *ocf_req_new_discard(ocf_queue_t queue, ocf_core_t core,
|
||||
uint64_t addr, uint32_t bytes, int rw);
|
||||
|
||||
/**
|
||||
* @brief Get number of allocated requests
|
||||
*
|
||||
* @param cache OCF cache instance
|
||||
*
|
||||
* @return Number of allocated requests
|
||||
*/
|
||||
uint32_t ocf_req_get_allocated(struct ocf_cache *cache);
|
||||
|
||||
/**
|
||||
* @brief Increment OCF request reference count
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user