Move resume callback to async lock function params (refactoring)
This is a step towards common async lock interface in OCF. Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
@@ -37,8 +37,6 @@ struct ocf_io_if {
|
||||
|
||||
int (*write)(struct ocf_request *req);
|
||||
|
||||
void (*resume)(struct ocf_request *req);
|
||||
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
@@ -22,30 +22,25 @@ static int _ocf_discard_step_do(struct ocf_request *req);
|
||||
static int _ocf_discard_step(struct ocf_request *req);
|
||||
static int _ocf_discard_flush_cache(struct ocf_request *req);
|
||||
static int _ocf_discard_core(struct ocf_request *req);
|
||||
static void _ocf_discard_on_resume(struct ocf_request *req);
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_step = {
|
||||
.read = _ocf_discard_step,
|
||||
.write = _ocf_discard_step,
|
||||
.resume = _ocf_discard_on_resume,
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_step_resume = {
|
||||
.read = _ocf_discard_step_do,
|
||||
.write = _ocf_discard_step_do,
|
||||
.resume = _ocf_discard_on_resume,
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_flush_cache = {
|
||||
.read = _ocf_discard_flush_cache,
|
||||
.write = _ocf_discard_flush_cache,
|
||||
.resume = _ocf_discard_on_resume,
|
||||
};
|
||||
|
||||
static const struct ocf_io_if _io_if_discard_core = {
|
||||
.read = _ocf_discard_core,
|
||||
.write = _ocf_discard_core,
|
||||
.resume = _ocf_discard_on_resume,
|
||||
};
|
||||
|
||||
static void _ocf_discard_complete_req(struct ocf_request *req, int error)
|
||||
@@ -239,7 +234,7 @@ static int _ocf_discard_step(struct ocf_request *req)
|
||||
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, _ocf_discard_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
@@ -104,7 +104,6 @@ static int _ocf_read_fast_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_read_fast_resume = {
|
||||
.read = _ocf_read_fast_do,
|
||||
.write = _ocf_read_fast_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_read_fast(struct ocf_request *req)
|
||||
@@ -129,7 +128,7 @@ int ocf_read_fast(struct ocf_request *req)
|
||||
hit = ocf_engine_is_hit(req);
|
||||
if (hit) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
@@ -174,7 +173,6 @@ int ocf_read_fast(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_write_fast_resume = {
|
||||
.read = ocf_write_wb_do,
|
||||
.write = ocf_write_wb_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_write_fast(struct ocf_request *req)
|
||||
@@ -199,7 +197,7 @@ int ocf_write_fast(struct ocf_request *req)
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
ocf_io_start(&req->ioi.io);
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD();
|
||||
|
@@ -99,7 +99,6 @@ int ocf_read_pt_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_pt_resume = {
|
||||
.read = ocf_read_pt_do,
|
||||
.write = ocf_read_pt_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_read_pt(struct ocf_request *req)
|
||||
@@ -130,7 +129,7 @@ int ocf_read_pt(struct ocf_request *req)
|
||||
/* There are mapped cache line,
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
|
||||
} else {
|
||||
/* No mapped cache lines, no need to get lock */
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
|
@@ -208,7 +208,6 @@ static int _ocf_read_generic_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_read_generic_resume = {
|
||||
.read = _ocf_read_generic_do,
|
||||
.write = _ocf_read_generic_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_read_generic(struct ocf_request *req)
|
||||
@@ -243,13 +242,13 @@ int ocf_read_generic(struct ocf_request *req)
|
||||
/* Request is fully mapped, no need to call eviction */
|
||||
if (ocf_engine_is_hit(req)) {
|
||||
/* There is a hit, lock request for READ access */
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
|
||||
} else {
|
||||
/* All cache line mapped, but some sectors are not valid
|
||||
* and cache insert will be performed - lock for
|
||||
* WRITE is required
|
||||
*/
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,12 +273,14 @@ int ocf_read_generic(struct ocf_request *req)
|
||||
/* After mapping turns out there is hit,
|
||||
* so lock OCF request for read access
|
||||
*/
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
lock = ocf_req_async_lock_rd(req,
|
||||
ocf_engine_on_resume);
|
||||
} else {
|
||||
/* Miss, new cache lines were mapped,
|
||||
* need to lock OCF request for write access
|
||||
*/
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req,
|
||||
ocf_engine_on_resume);
|
||||
}
|
||||
}
|
||||
OCF_METADATA_UNLOCK_WR();
|
||||
|
@@ -21,7 +21,6 @@
|
||||
static const struct ocf_io_if _io_if_wb_resume = {
|
||||
.read = ocf_write_wb_do,
|
||||
.write = ocf_write_wb_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
static void _ocf_write_wb_update_bits(struct ocf_request *req)
|
||||
@@ -190,7 +189,7 @@ int ocf_write_wb(struct ocf_request *req)
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
@@ -206,7 +205,7 @@ int ocf_write_wb(struct ocf_request *req)
|
||||
|
||||
if (!req->info.mapping_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
|
@@ -130,7 +130,6 @@ static void _ocf_write_wi_on_resume(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_wi_resume = {
|
||||
.read = _ocf_write_wi_do,
|
||||
.write = _ocf_write_wi_do,
|
||||
.resume = _ocf_write_wi_on_resume,
|
||||
};
|
||||
|
||||
int ocf_write_wi(struct ocf_request *req)
|
||||
@@ -155,7 +154,7 @@ int ocf_write_wi(struct ocf_request *req)
|
||||
|
||||
if (ocf_engine_mapped_count(req)) {
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, _ocf_write_wi_on_resume);
|
||||
} else {
|
||||
lock = OCF_LOCK_ACQUIRED;
|
||||
}
|
||||
|
@@ -197,7 +197,6 @@ int ocf_read_wo_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_wo_resume = {
|
||||
.read = ocf_read_wo_do,
|
||||
.write = ocf_read_wo_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_read_wo(struct ocf_request *req)
|
||||
@@ -224,7 +223,7 @@ int ocf_read_wo(struct ocf_request *req)
|
||||
/* There are mapped cache lines,
|
||||
* lock request for READ access
|
||||
*/
|
||||
lock = ocf_req_trylock_rd(req);
|
||||
lock = ocf_req_async_lock_rd(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
|
||||
|
@@ -158,7 +158,6 @@ static int _ocf_write_wt_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_wt_resume = {
|
||||
.read = _ocf_write_wt_do,
|
||||
.write = _ocf_write_wt_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
int ocf_write_wt(struct ocf_request *req)
|
||||
@@ -183,7 +182,7 @@ int ocf_write_wt(struct ocf_request *req)
|
||||
mapped = ocf_engine_is_mapped(req);
|
||||
if (mapped) {
|
||||
/* All cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
|
||||
@@ -199,7 +198,7 @@ int ocf_write_wt(struct ocf_request *req)
|
||||
|
||||
if (!req->info.mapping_error) {
|
||||
/* Lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
}
|
||||
|
||||
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
|
||||
|
@@ -129,7 +129,6 @@ static int _ocf_zero_do(struct ocf_request *req)
|
||||
static const struct ocf_io_if _io_if_ocf_zero_do = {
|
||||
.read = _ocf_zero_do,
|
||||
.write = _ocf_zero_do,
|
||||
.resume = ocf_engine_on_resume,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -151,7 +150,7 @@ void ocf_engine_zero_line(struct ocf_request *req)
|
||||
req->io_if = &_io_if_ocf_zero_do;
|
||||
|
||||
/* Some cache line are mapped, lock request for WRITE access */
|
||||
lock = ocf_req_trylock_wr(req);
|
||||
lock = ocf_req_async_lock_wr(req, ocf_engine_on_resume);
|
||||
|
||||
if (lock >= 0) {
|
||||
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
|
||||
|
Reference in New Issue
Block a user