Add getter function for cache->device->concurrency.cache_line
The purpose of this change is to facilitate unit testing. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
parent
ce2ff14150
commit
1411314678
@ -398,7 +398,7 @@ static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
|
|||||||
if (info.status == LOOKUP_HIT &&
|
if (info.status == LOOKUP_HIT &&
|
||||||
metadata_test_dirty(cache, info.coll_idx)) {
|
metadata_test_dirty(cache, info.coll_idx)) {
|
||||||
locked = ocf_cache_line_try_lock_rd(
|
locked = ocf_cache_line_try_lock_rd(
|
||||||
cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(cache),
|
||||||
info.coll_idx);
|
info.coll_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,7 +474,7 @@ static void _acp_flush_end(void *priv, int error)
|
|||||||
|
|
||||||
for (i = 0; i < flush->size; i++) {
|
for (i = 0; i < flush->size; i++) {
|
||||||
ocf_cache_line_unlock_rd(
|
ocf_cache_line_unlock_rd(
|
||||||
cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(cache),
|
||||||
flush->data[i].cache_line);
|
flush->data[i].cache_line);
|
||||||
ACP_DEBUG_END(acp, flush->data[i].cache_line);
|
ACP_DEBUG_END(acp, flush->data[i].cache_line);
|
||||||
}
|
}
|
||||||
|
@ -682,8 +682,7 @@ static bool block_is_busy(struct ocf_cache *cache,
|
|||||||
if (!cache->core[core_id].opened)
|
if (!cache->core[core_id].opened)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (ocf_cache_line_is_used(
|
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||||
cache->device->concurrency.cache_line,
|
|
||||||
cache_line)) {
|
cache_line)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1130,7 +1130,8 @@ bool ocf_cache_line_are_waiters(struct ocf_cache_line_concurrency *c,
|
|||||||
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
|
bool ocf_cache_line_is_locked_exclusively(struct ocf_cache *cache,
|
||||||
ocf_cache_line_t line)
|
ocf_cache_line_t line)
|
||||||
{
|
{
|
||||||
struct ocf_cache_line_concurrency *c = cache->device->concurrency.cache_line;
|
struct ocf_cache_line_concurrency *c =
|
||||||
|
ocf_cache_line_concurrency(cache);
|
||||||
env_atomic *access = &c->access[line];
|
env_atomic *access = &c->access[line];
|
||||||
int val = env_atomic_read(access);
|
int val = env_atomic_read(access);
|
||||||
|
|
||||||
|
@ -202,4 +202,16 @@ void ocf_cache_line_unlock_wr(struct ocf_cache_line_concurrency *c,
|
|||||||
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
|
bool ocf_cache_line_try_lock_wr(struct ocf_cache_line_concurrency *c,
|
||||||
ocf_cache_line_t line);
|
ocf_cache_line_t line);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get cacheline concurrency context
|
||||||
|
*
|
||||||
|
* @param cache - cache instance
|
||||||
|
* @return cacheline concurrency context
|
||||||
|
*/
|
||||||
|
static inline struct ocf_cache_line_concurrency *
|
||||||
|
ocf_cache_line_concurrency(ocf_cache_t cache)
|
||||||
|
{
|
||||||
|
return cache->device->concurrency.cache_line;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* OCF_CONCURRENCY_H_ */
|
#endif /* OCF_CONCURRENCY_H_ */
|
||||||
|
@ -64,7 +64,7 @@ static void _ocf_backfill_complete(struct ocf_request *req, int error)
|
|||||||
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
|
ocf_core_stats_cache_error_update(req->core, OCF_WRITE);
|
||||||
ocf_engine_invalidate(req);
|
ocf_engine_invalidate(req);
|
||||||
} else {
|
} else {
|
||||||
ocf_req_unlock(cache->device->concurrency.cache_line, req);
|
ocf_req_unlock(ocf_cache_line_concurrency(cache), req);
|
||||||
|
|
||||||
/* put the request at the last point of the completion path */
|
/* put the request at the last point of the completion path */
|
||||||
ocf_req_put(req);
|
ocf_req_put(req);
|
||||||
|
@ -434,7 +434,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
|||||||
req->error |= error;
|
req->error |= error;
|
||||||
|
|
||||||
/* End request and do not processing */
|
/* End request and do not processing */
|
||||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock(ocf_cache_line_concurrency(req->cache),
|
||||||
req);
|
req);
|
||||||
|
|
||||||
/* Complete request */
|
/* Complete request */
|
||||||
@ -451,8 +451,7 @@ static void _ocf_engine_clean_end(void *private_data, int error)
|
|||||||
|
|
||||||
static int _lock_clines(struct ocf_request *req)
|
static int _lock_clines(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
struct ocf_cache_line_concurrency *c =
|
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(req->cache);
|
||||||
req->cache->device->concurrency.cache_line;
|
|
||||||
enum ocf_engine_lock_type lock_type =
|
enum ocf_engine_lock_type lock_type =
|
||||||
req->engine_cbs->get_lock_type(req);
|
req->engine_cbs->get_lock_type(req);
|
||||||
|
|
||||||
@ -742,8 +741,7 @@ static int _ocf_engine_refresh(struct ocf_request *req)
|
|||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
/* Release WRITE lock of request */
|
/* Release WRITE lock of request */
|
||||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||||
req);
|
|
||||||
|
|
||||||
/* Release OCF request */
|
/* Release OCF request */
|
||||||
ocf_req_put(req);
|
ocf_req_put(req);
|
||||||
|
@ -147,7 +147,7 @@ static void _ocf_discard_step_complete(struct ocf_request *req, int error)
|
|||||||
OCF_DEBUG_RQ(req, "Completion");
|
OCF_DEBUG_RQ(req, "Completion");
|
||||||
|
|
||||||
/* Release WRITE lock of request */
|
/* Release WRITE lock of request */
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
if (req->error) {
|
if (req->error) {
|
||||||
ocf_metadata_error(req->cache);
|
ocf_metadata_error(req->cache);
|
||||||
@ -236,7 +236,7 @@ static int _ocf_discard_step(struct ocf_request *req)
|
|||||||
if (ocf_engine_mapped_count(req)) {
|
if (ocf_engine_mapped_count(req)) {
|
||||||
/* Some cache line are mapped, lock request for WRITE access */
|
/* Some cache line are mapped, lock request for WRITE access */
|
||||||
lock = ocf_req_async_lock_wr(
|
lock = ocf_req_async_lock_wr(
|
||||||
cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(cache),
|
||||||
req, _ocf_discard_on_resume);
|
req, _ocf_discard_on_resume);
|
||||||
} else {
|
} else {
|
||||||
lock = OCF_LOCK_ACQUIRED;
|
lock = OCF_LOCK_ACQUIRED;
|
||||||
|
@ -46,8 +46,7 @@ static void _ocf_read_fast_complete(struct ocf_request *req, int error)
|
|||||||
ocf_core_stats_cache_error_update(req->core, OCF_READ);
|
ocf_core_stats_cache_error_update(req->core, OCF_READ);
|
||||||
ocf_engine_push_req_front_pt(req);
|
ocf_engine_push_req_front_pt(req);
|
||||||
} else {
|
} else {
|
||||||
ocf_req_unlock(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock(ocf_cache_line_concurrency(req->cache), req);
|
||||||
req);
|
|
||||||
|
|
||||||
/* Complete request */
|
/* Complete request */
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
@ -132,7 +131,7 @@ int ocf_read_fast(struct ocf_request *req)
|
|||||||
if (hit && part_has_space) {
|
if (hit && part_has_space) {
|
||||||
ocf_io_start(&req->ioi.io);
|
ocf_io_start(&req->ioi.io);
|
||||||
lock = ocf_req_async_lock_rd(
|
lock = ocf_req_async_lock_rd(
|
||||||
req->cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(req->cache),
|
||||||
req, ocf_engine_on_resume);
|
req, ocf_engine_on_resume);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,7 +203,7 @@ int ocf_write_fast(struct ocf_request *req)
|
|||||||
if (mapped && part_has_space) {
|
if (mapped && part_has_space) {
|
||||||
ocf_io_start(&req->ioi.io);
|
ocf_io_start(&req->ioi.io);
|
||||||
lock = ocf_req_async_lock_wr(
|
lock = ocf_req_async_lock_wr(
|
||||||
req->cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(req->cache),
|
||||||
req, ocf_engine_on_resume);
|
req, ocf_engine_on_resume);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ static void _ocf_invalidate_req(struct ocf_request *req, int error)
|
|||||||
if (req->error)
|
if (req->error)
|
||||||
ocf_engine_error(req, true, "Failed to flush metadata to cache");
|
ocf_engine_error(req, true, "Failed to flush metadata to cache");
|
||||||
|
|
||||||
ocf_req_unlock(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
/* Put OCF request - decrease reference counter */
|
/* Put OCF request - decrease reference counter */
|
||||||
ocf_req_put(req);
|
ocf_req_put(req);
|
||||||
|
@ -34,7 +34,7 @@ static void _ocf_read_pt_complete(struct ocf_request *req, int error)
|
|||||||
/* Complete request */
|
/* Complete request */
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
/* Release OCF request */
|
/* Release OCF request */
|
||||||
ocf_req_put(req);
|
ocf_req_put(req);
|
||||||
|
@ -24,8 +24,8 @@
|
|||||||
|
|
||||||
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
|
static void _ocf_read_generic_hit_complete(struct ocf_request *req, int error)
|
||||||
{
|
{
|
||||||
struct ocf_cache_line_concurrency *c =
|
struct ocf_cache_line_concurrency *c = ocf_cache_line_concurrency(
|
||||||
req->cache->device->concurrency.cache_line;
|
req->cache);
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
req->error |= error;
|
req->error |= error;
|
||||||
|
@ -60,7 +60,7 @@ static void _ocf_write_wb_io_flush_metadata(struct ocf_request *req, int error)
|
|||||||
if (req->error)
|
if (req->error)
|
||||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||||
|
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ static const struct ocf_io_if _io_if_wi_update_metadata = {
|
|||||||
|
|
||||||
int _ocf_write_wi_next_pass(struct ocf_request *req)
|
int _ocf_write_wi_next_pass(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
if (req->wi_second_pass) {
|
if (req->wi_second_pass) {
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
@ -75,7 +75,7 @@ static void _ocf_write_wi_io_flush_metadata(struct ocf_request *req, int error)
|
|||||||
if (req->error)
|
if (req->error)
|
||||||
ocf_engine_error(req, true, "Failed to write data to cache");
|
ocf_engine_error(req, true, "Failed to write data to cache");
|
||||||
|
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
@ -128,8 +128,7 @@ static void _ocf_write_wi_core_complete(struct ocf_request *req, int error)
|
|||||||
OCF_DEBUG_RQ(req, "Completion");
|
OCF_DEBUG_RQ(req, "Completion");
|
||||||
|
|
||||||
if (req->error) {
|
if (req->error) {
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
req);
|
|
||||||
|
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
@ -200,7 +199,7 @@ int ocf_write_wi(struct ocf_request *req)
|
|||||||
if (ocf_engine_mapped_count(req)) {
|
if (ocf_engine_mapped_count(req)) {
|
||||||
/* Some cache line are mapped, lock request for WRITE access */
|
/* Some cache line are mapped, lock request for WRITE access */
|
||||||
lock = ocf_req_async_lock_wr(
|
lock = ocf_req_async_lock_wr(
|
||||||
req->cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(req->cache),
|
||||||
req, _ocf_write_wi_on_resume);
|
req, _ocf_write_wi_on_resume);
|
||||||
} else {
|
} else {
|
||||||
lock = OCF_LOCK_ACQUIRED;
|
lock = OCF_LOCK_ACQUIRED;
|
||||||
|
@ -33,7 +33,7 @@ static void ocf_read_wo_cache_complete(struct ocf_request *req, int error)
|
|||||||
if (req->error)
|
if (req->error)
|
||||||
ocf_engine_error(req, true, "Failed to read data from cache");
|
ocf_engine_error(req, true, "Failed to read data from cache");
|
||||||
|
|
||||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
/* Complete request */
|
/* Complete request */
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
@ -169,8 +169,7 @@ static void _ocf_read_wo_core_complete(struct ocf_request *req, int error)
|
|||||||
if (!req->info.dirty_any || req->error) {
|
if (!req->info.dirty_any || req->error) {
|
||||||
OCF_DEBUG_RQ(req, "Completion");
|
OCF_DEBUG_RQ(req, "Completion");
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
ocf_req_unlock_rd(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock_rd(ocf_cache_line_concurrency(req->cache), req);
|
||||||
req);
|
|
||||||
ocf_req_put(req);
|
ocf_req_put(req);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -238,7 +237,7 @@ int ocf_read_wo(struct ocf_request *req)
|
|||||||
* lock request for READ access
|
* lock request for READ access
|
||||||
*/
|
*/
|
||||||
lock = ocf_req_async_lock_rd(
|
lock = ocf_req_async_lock_rd(
|
||||||
req->cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(req->cache),
|
||||||
req, ocf_engine_on_resume);
|
req, ocf_engine_on_resume);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,8 +34,7 @@ static void _ocf_write_wt_req_complete(struct ocf_request *req)
|
|||||||
ocf_engine_invalidate(req);
|
ocf_engine_invalidate(req);
|
||||||
} else {
|
} else {
|
||||||
/* Unlock reqest from WRITE access */
|
/* Unlock reqest from WRITE access */
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line,
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
req);
|
|
||||||
|
|
||||||
/* Complete request */
|
/* Complete request */
|
||||||
req->complete(req, req->info.core_error ? req->error : 0);
|
req->complete(req, req->info.core_error ? req->error : 0);
|
||||||
|
@ -31,7 +31,7 @@ static int ocf_zero_purge(struct ocf_request *req)
|
|||||||
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
|
ocf_hb_req_prot_unlock_wr(req); /*- END Metadata WR access ---------*/
|
||||||
}
|
}
|
||||||
|
|
||||||
ocf_req_unlock_wr(req->cache->device->concurrency.cache_line, req);
|
ocf_req_unlock_wr(ocf_cache_line_concurrency(req->cache), req);
|
||||||
|
|
||||||
req->complete(req, req->error);
|
req->complete(req, req->error);
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
|
|||||||
|
|
||||||
/* Some cache line are mapped, lock request for WRITE access */
|
/* Some cache line are mapped, lock request for WRITE access */
|
||||||
lock = ocf_req_async_lock_wr(
|
lock = ocf_req_async_lock_wr(
|
||||||
req->cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(req->cache),
|
||||||
req, ocf_engine_on_resume);
|
req, ocf_engine_on_resume);
|
||||||
|
|
||||||
if (lock >= 0) {
|
if (lock >= 0) {
|
||||||
|
@ -357,8 +357,7 @@ static int evp_lru_clean_getter(ocf_cache_t cache, void *getter_context,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* Prevent evicting already locked items */
|
/* Prevent evicting already locked items */
|
||||||
if (ocf_cache_line_is_used(
|
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||||
cache->device->concurrency.cache_line,
|
|
||||||
cline)) {
|
cline)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -492,8 +491,7 @@ uint32_t evp_lru_req_clines(ocf_cache_t cache, ocf_queue_t io_queue,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/* Prevent evicting already locked items */
|
/* Prevent evicting already locked items */
|
||||||
if (ocf_cache_line_is_used(
|
if (ocf_cache_line_is_used(ocf_cache_line_concurrency(cache),
|
||||||
cache->device->concurrency.cache_line,
|
|
||||||
cline)) {
|
cline)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ int ocf_metadata_actor(struct ocf_cache *cache,
|
|||||||
uint64_t start_line, end_line;
|
uint64_t start_line, end_line;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct ocf_cache_line_concurrency *c =
|
struct ocf_cache_line_concurrency *c =
|
||||||
cache->device->concurrency.cache_line;
|
ocf_cache_line_concurrency(cache);
|
||||||
|
|
||||||
start_line = ocf_bytes_2_lines(cache, start_byte);
|
start_line = ocf_bytes_2_lines(cache, start_byte);
|
||||||
end_line = ocf_bytes_2_lines(cache, end_byte);
|
end_line = ocf_bytes_2_lines(cache, end_byte);
|
||||||
|
@ -81,7 +81,7 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!ocf_cache_line_try_lock_wr(
|
if (!ocf_cache_line_try_lock_wr(
|
||||||
cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(cache),
|
||||||
curr_cline)) {
|
curr_cline)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -90,7 +90,8 @@ void cache_mngt_core_deinit_attached_meta(ocf_core_t core)
|
|||||||
ocf_purge_cleaning_policy(cache, curr_cline);
|
ocf_purge_cleaning_policy(cache, curr_cline);
|
||||||
ocf_metadata_sparse_cache_line(cache, curr_cline);
|
ocf_metadata_sparse_cache_line(cache, curr_cline);
|
||||||
|
|
||||||
ocf_cache_line_unlock_wr(cache->device->concurrency.cache_line,
|
ocf_cache_line_unlock_wr(
|
||||||
|
ocf_cache_line_concurrency(cache),
|
||||||
curr_cline);
|
curr_cline);
|
||||||
|
|
||||||
if (prev_cline != cache->device->collision_table_entries)
|
if (prev_cline != cache->device->collision_table_entries)
|
||||||
|
@ -44,8 +44,7 @@ static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
|
|||||||
* only valid bits
|
* only valid bits
|
||||||
*/
|
*/
|
||||||
if (!is_valid && !ocf_cache_line_are_waiters(
|
if (!is_valid && !ocf_cache_line_are_waiters(
|
||||||
cache->device->concurrency.cache_line,
|
ocf_cache_line_concurrency(cache), line)) {
|
||||||
line)) {
|
|
||||||
ocf_purge_eviction_policy(cache, line);
|
ocf_purge_eviction_policy(cache, line);
|
||||||
ocf_metadata_remove_cache_line(cache, line);
|
ocf_metadata_remove_cache_line(cache, line);
|
||||||
}
|
}
|
||||||
|
@ -213,8 +213,7 @@ static int _ocf_cleaner_cache_line_lock(struct ocf_request *req)
|
|||||||
|
|
||||||
OCF_DEBUG_TRACE(req->cache);
|
OCF_DEBUG_TRACE(req->cache);
|
||||||
|
|
||||||
return ocf_req_async_lock_rd(
|
return ocf_req_async_lock_rd(ocf_cache_line_concurrency(req->cache),
|
||||||
req->cache->device->concurrency.cache_line,
|
|
||||||
req, _ocf_cleaner_on_resume);
|
req, _ocf_cleaner_on_resume);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user