Unify req naming convention (rq -> req)
Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
@@ -139,17 +139,17 @@ int ocf_metadata_load_recovery(struct ocf_cache *cache)
|
||||
return cache->metadata.iface.load_recovery(cache);
|
||||
}
|
||||
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state,
|
||||
cache->metadata.iface.flush_mark(cache, req, map_idx, to_state,
|
||||
start, stop);
|
||||
}
|
||||
|
||||
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete)
|
||||
struct ocf_request *req, ocf_req_end_t complete)
|
||||
{
|
||||
cache->metadata.iface.flush_do_asynch(cache, rq, complete);
|
||||
cache->metadata.iface.flush_do_asynch(cache, req, complete);
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_check_properties(void)
|
||||
|
@@ -244,7 +244,7 @@ void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
|
||||
* @param[in] cache - Cache instance
|
||||
* @param[in] line - cache line which to be flushed
|
||||
*/
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/**
|
||||
@@ -257,7 +257,7 @@ void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
* @param context - context that will be passed into callback
|
||||
*/
|
||||
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete);
|
||||
struct ocf_request *req, ocf_req_end_t complete);
|
||||
|
||||
/**
|
||||
* @brief Load metadata
|
||||
|
@@ -1207,7 +1207,7 @@ static void ocf_metadata_hash_flush(struct ocf_cache *cache,
|
||||
* Flush specified cache line
|
||||
*/
|
||||
static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t map_idx, int to_state,
|
||||
struct ocf_request *req, uint32_t map_idx, int to_state,
|
||||
uint8_t start, uint8_t stop)
|
||||
{
|
||||
struct ocf_metadata_hash_ctrl *ctrl = NULL;
|
||||
@@ -1224,14 +1224,14 @@ static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache,
|
||||
/* Collision table to get mapping cache line to HDD sector*/
|
||||
ocf_metadata_raw_flush_mark(cache,
|
||||
&(ctrl->raw_desc[metadata_segment_collision]),
|
||||
rq, map_idx, to_state, start, stop);
|
||||
req, map_idx, to_state, start, stop);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush specified cache lines asynchronously
|
||||
*/
|
||||
static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete)
|
||||
struct ocf_request *req, ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0;
|
||||
struct ocf_metadata_hash_ctrl *ctrl = NULL;
|
||||
@@ -1245,9 +1245,9 @@ static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
|
||||
* line persistent in case of recovery
|
||||
*/
|
||||
|
||||
env_atomic_inc(&rq->req_remaining); /* Core device IO */
|
||||
env_atomic_inc(&req->req_remaining); /* Core device IO */
|
||||
|
||||
result |= ocf_metadata_raw_flush_do_asynch(cache, rq,
|
||||
result |= ocf_metadata_raw_flush_do_asynch(cache, req,
|
||||
&(ctrl->raw_desc[metadata_segment_collision]),
|
||||
complete);
|
||||
|
||||
|
@@ -455,7 +455,7 @@ out:
|
||||
ctx_data_free(cache->owner, data);
|
||||
ocf_io_put(io);
|
||||
|
||||
if (env_atomic_dec_return(&mio->rq_remaining))
|
||||
if (env_atomic_dec_return(&mio->req_remaining))
|
||||
return;
|
||||
|
||||
env_completion_complete(&mio->completion);
|
||||
@@ -505,7 +505,7 @@ static int metadata_submit_io(
|
||||
goto free_data;
|
||||
|
||||
/* Submit IO */
|
||||
env_atomic_inc(&mio->rq_remaining);
|
||||
env_atomic_inc(&mio->req_remaining);
|
||||
ocf_dobj_submit_io(io);
|
||||
|
||||
return 0;
|
||||
@@ -542,7 +542,7 @@ static int metadata_io(struct metadata_io *mio)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
env_atomic_set(&mio->rq_remaining, 1);
|
||||
env_atomic_set(&mio->req_remaining, 1);
|
||||
env_completion_init(&mio->completion);
|
||||
|
||||
while (count) {
|
||||
@@ -559,7 +559,7 @@ static int metadata_io(struct metadata_io *mio)
|
||||
OCF_COND_RESCHED(step, 128);
|
||||
}
|
||||
|
||||
if (env_atomic_dec_return(&mio->rq_remaining) == 0)
|
||||
if (env_atomic_dec_return(&mio->req_remaining) == 0)
|
||||
env_completion_complete(&mio->completion);
|
||||
|
||||
/* Wait for all IO to be finished */
|
||||
|
@@ -79,7 +79,7 @@ struct metadata_io {
|
||||
uint32_t page;
|
||||
uint32_t count;
|
||||
env_completion completion;
|
||||
env_atomic rq_remaining;
|
||||
env_atomic req_remaining;
|
||||
ocf_metadata_io_event_t hndl_fn;
|
||||
void *hndl_cntx;
|
||||
};
|
||||
|
@@ -291,12 +291,12 @@ static int _raw_ram_flush_all(struct ocf_cache *cache,
|
||||
* RAM RAM Implementation - Mark to Flush
|
||||
*/
|
||||
static void _raw_ram_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, uint32_t map_idx, int to_state,
|
||||
struct ocf_request *req, uint32_t map_idx, int to_state,
|
||||
uint8_t start, uint8_t stop)
|
||||
{
|
||||
if (to_state == DIRTY || to_state == CLEAN) {
|
||||
rq->map[map_idx].flush = true;
|
||||
rq->info.flush_metadata = true;
|
||||
req->map[map_idx].flush = true;
|
||||
req->info.flush_metadata = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ static void _raw_ram_flush_mark(struct ocf_cache *cache,
|
||||
******************************************************************************/
|
||||
struct _raw_ram_flush_ctx {
|
||||
struct ocf_metadata_raw *raw;
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
ocf_req_end_t complete;
|
||||
env_atomic flush_req_cnt;
|
||||
int error;
|
||||
@@ -327,8 +327,8 @@ static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache,
|
||||
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
|
||||
|
||||
/* Call metadata flush completed call back */
|
||||
ctx->rq->error |= ctx->error;
|
||||
ctx->complete(ctx->rq, ctx->error);
|
||||
ctx->req->error |= ctx->error;
|
||||
ctx->complete(ctx->req, ctx->error);
|
||||
|
||||
env_free(ctx);
|
||||
}
|
||||
@@ -382,15 +382,15 @@ int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
|
||||
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *req,
|
||||
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
|
||||
int *pages_to_flush) {
|
||||
int i, j = 0;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
struct ocf_map_info *map;
|
||||
|
||||
for (i = 0; i < line_no; i++) {
|
||||
map = &rq->map[i];
|
||||
map = &req->map[i];
|
||||
if (map->flush) {
|
||||
pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
|
||||
j++;
|
||||
@@ -401,13 +401,13 @@ static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
|
||||
}
|
||||
|
||||
static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0, i;
|
||||
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
|
||||
uint32_t *pages_tab;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
int pages_to_flush;
|
||||
uint32_t start_page = 0;
|
||||
uint32_t count = 0;
|
||||
@@ -417,19 +417,19 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
|
||||
OCF_DEBUG_TRACE(cache);
|
||||
|
||||
if (!rq->info.flush_metadata) {
|
||||
if (!req->info.flush_metadata) {
|
||||
/* Nothing to flush call flush callback */
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
|
||||
if (!ctx) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->rq = rq;
|
||||
ctx->req = req;
|
||||
ctx->complete = complete;
|
||||
ctx->raw = raw;
|
||||
env_atomic_set(&ctx->flush_req_cnt, 1);
|
||||
@@ -440,7 +440,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
|
||||
if (!pages_tab) {
|
||||
env_free(ctx);
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -449,7 +449,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
* to prevent freeing of asynchronous context
|
||||
*/
|
||||
|
||||
__raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw,
|
||||
__raw_ram_flush_do_asynch_add_pages(req, pages_tab, raw,
|
||||
&pages_to_flush);
|
||||
|
||||
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
|
||||
@@ -479,7 +479,7 @@ static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
|
||||
|
||||
env_atomic_inc(&ctx->flush_req_cnt);
|
||||
|
||||
result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx,
|
||||
result |= metadata_io_write_i_asynch(cache, req->io_queue, ctx,
|
||||
raw->ssd_pages_offset + start_page, count,
|
||||
_raw_ram_flush_do_asynch_fill,
|
||||
_raw_ram_flush_do_asynch_io_complete);
|
||||
|
@@ -130,11 +130,11 @@ struct raw_iface {
|
||||
int (*flush_all)(struct ocf_cache *cache,
|
||||
struct ocf_metadata_raw *raw);
|
||||
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start,
|
||||
uint8_t stop);
|
||||
|
||||
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete);
|
||||
};
|
||||
@@ -308,17 +308,17 @@ static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache,
|
||||
|
||||
|
||||
static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache,
|
||||
struct ocf_metadata_raw *raw, struct ocf_request *rq,
|
||||
struct ocf_metadata_raw *raw, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
raw->iface->flush_mark(cache, rq, map_idx, to_state, start, stop);
|
||||
raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop);
|
||||
}
|
||||
|
||||
static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
return raw->iface->flush_do_asynch(cache, rq, raw, complete);
|
||||
return raw->iface->flush_do_asynch(cache, req, raw, complete);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#endif
|
||||
|
||||
struct _raw_atomic_flush_ctx {
|
||||
struct ocf_request *rq;
|
||||
struct ocf_request *req;
|
||||
ocf_req_end_t complete;
|
||||
env_atomic flush_req_cnt;
|
||||
};
|
||||
@@ -41,18 +41,18 @@ static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx,
|
||||
int error)
|
||||
{
|
||||
if (error)
|
||||
ctx->rq->error = error;
|
||||
ctx->req->error = error;
|
||||
|
||||
if (env_atomic_dec_return(&ctx->flush_req_cnt))
|
||||
return;
|
||||
|
||||
if (ctx->rq->error)
|
||||
ocf_metadata_error(ctx->rq->cache);
|
||||
if (ctx->req->error)
|
||||
ocf_metadata_error(ctx->req->cache);
|
||||
|
||||
/* Call metadata flush completed call back */
|
||||
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
|
||||
|
||||
ctx->complete(ctx->rq, ctx->rq->error);
|
||||
ctx->complete(ctx->req, ctx->req->error);
|
||||
|
||||
env_free(ctx);
|
||||
}
|
||||
@@ -69,12 +69,12 @@ static void _raw_atomic_io_discard_end(struct ocf_io *io, int error)
|
||||
static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
|
||||
uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx)
|
||||
{
|
||||
struct ocf_request *rq = context;
|
||||
struct ocf_request *req = context;
|
||||
struct ocf_io *io = ocf_new_cache_io(cache);
|
||||
|
||||
if (!io) {
|
||||
rq->error = -ENOMEM;
|
||||
return rq->error;
|
||||
req->error = -ENOMEM;
|
||||
return req->error;
|
||||
}
|
||||
|
||||
OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u",
|
||||
@@ -90,17 +90,17 @@ static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context,
|
||||
else
|
||||
ocf_dobj_submit_write_zeroes(io);
|
||||
|
||||
return rq->error;
|
||||
return req->error;
|
||||
}
|
||||
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
if (to_state == INVALID) {
|
||||
rq->map[map_idx].flush = true;
|
||||
rq->map[map_idx].start_flush = start;
|
||||
rq->map[map_idx].stop_flush = stop;
|
||||
rq->info.flush_metadata = true;
|
||||
req->map[map_idx].flush = true;
|
||||
req->map[map_idx].start_flush = start;
|
||||
req->map[map_idx].stop_flush = stop;
|
||||
req->info.flush_metadata = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,10 +114,10 @@ static inline void _raw_atomic_add_page(struct ocf_cache *cache,
|
||||
}
|
||||
|
||||
static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, int map_idx,
|
||||
struct ocf_request *req, int map_idx,
|
||||
struct _raw_atomic_flush_ctx *ctx)
|
||||
{
|
||||
struct ocf_map_info *map = &rq->map[map_idx];
|
||||
struct ocf_map_info *map = &req->map[map_idx];
|
||||
uint32_t len = 0;
|
||||
uint64_t start_addr;
|
||||
int result = 0;
|
||||
@@ -130,12 +130,12 @@ static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache,
|
||||
len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush);
|
||||
len += SECTORS_TO_BYTES(1);
|
||||
|
||||
result = _raw_atomic_io_discard_do(cache, rq, start_addr, len, ctx);
|
||||
result = _raw_atomic_io_discard_do(cache, req, start_addr, len, ctx);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete)
|
||||
{
|
||||
int result = 0, i;
|
||||
@@ -143,33 +143,33 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
uint32_t *clines_tab;
|
||||
int clines_to_flush = 0;
|
||||
uint32_t len = 0;
|
||||
int line_no = rq->core_line_count;
|
||||
int line_no = req->core_line_count;
|
||||
struct ocf_map_info *map;
|
||||
uint64_t start_addr;
|
||||
struct _raw_atomic_flush_ctx *ctx;
|
||||
|
||||
ENV_BUG_ON(!complete);
|
||||
|
||||
if (!rq->info.flush_metadata) {
|
||||
if (!req->info.flush_metadata) {
|
||||
/* Nothing to flush call flush callback */
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
|
||||
if (!ctx) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctx->rq = rq;
|
||||
ctx->req = req;
|
||||
ctx->complete = complete;
|
||||
env_atomic_set(&ctx->flush_req_cnt, 1);
|
||||
|
||||
if (line_no == 1) {
|
||||
map = &rq->map[0];
|
||||
map = &req->map[0];
|
||||
if (map->flush && map->status != LOOKUP_MISS) {
|
||||
result = _raw_atomic_flush_do_asynch_sec(cache, rq,
|
||||
result = _raw_atomic_flush_do_asynch_sec(cache, req,
|
||||
0, ctx);
|
||||
}
|
||||
_raw_atomic_io_discard_cmpl(ctx, result);
|
||||
@@ -182,14 +182,14 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
clines_tab = env_zalloc(sizeof(*clines_tab) * line_no,
|
||||
ENV_MEM_NOIO);
|
||||
if (!clines_tab) {
|
||||
complete(rq, -ENOMEM);
|
||||
complete(req, -ENOMEM);
|
||||
env_free(ctx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < line_no; i++) {
|
||||
map = &rq->map[i];
|
||||
map = &req->map[i];
|
||||
|
||||
if (!map->flush || map->status == LOOKUP_MISS)
|
||||
continue;
|
||||
@@ -197,7 +197,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
if (i == 0) {
|
||||
/* First */
|
||||
if (map->start_flush) {
|
||||
_raw_atomic_flush_do_asynch_sec(cache, rq, i,
|
||||
_raw_atomic_flush_do_asynch_sec(cache, req, i,
|
||||
ctx);
|
||||
} else {
|
||||
_raw_atomic_add_page(cache, clines_tab,
|
||||
@@ -206,7 +206,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
} else if (i == (line_no - 1)) {
|
||||
/* Last */
|
||||
if (map->stop_flush != ocf_line_end_sector(cache)) {
|
||||
_raw_atomic_flush_do_asynch_sec(cache, rq,
|
||||
_raw_atomic_flush_do_asynch_sec(cache, req,
|
||||
i, ctx);
|
||||
} else {
|
||||
_raw_atomic_add_page(cache, clines_tab,
|
||||
@@ -241,7 +241,7 @@ int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
len += ocf_line_size(cache);
|
||||
}
|
||||
|
||||
result |= _raw_atomic_io_discard_do(cache, rq, start_addr,
|
||||
result |= _raw_atomic_io_discard_do(cache, req, start_addr,
|
||||
len, ctx);
|
||||
|
||||
if (result)
|
||||
|
@@ -6,10 +6,10 @@
|
||||
#ifndef __METADATA_RAW_ATOMIC_H__
|
||||
#define __METADATA_RAW_ATOMIC_H__
|
||||
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_atomic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
|
||||
|
||||
#endif /* __METADATA_RAW_ATOMIC_H__ */
|
||||
|
@@ -428,7 +428,7 @@ int raw_dynamic_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM DYNAMIC Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
ENV_BUG();
|
||||
@@ -437,7 +437,7 @@ void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
/*
|
||||
* RAM DYNAMIC Implementation - Do flushing asynchronously
|
||||
*/
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete)
|
||||
{
|
||||
ENV_BUG();
|
||||
|
@@ -92,13 +92,13 @@ int raw_dynamic_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAW DYNAMIC - Mark specified entry to be flushed
|
||||
*/
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/*
|
||||
* DYNAMIC Implementation - Do Flush Asynchronously
|
||||
*/
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
|
||||
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
|
||||
|
||||
|
||||
|
@@ -57,7 +57,7 @@ int raw_volatile_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM RAM Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
|
||||
{
|
||||
}
|
||||
@@ -66,9 +66,9 @@ void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
* RAM RAM Implementation - Do Flush asynchronously
|
||||
*/
|
||||
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete)
|
||||
{
|
||||
complete(rq, 0);
|
||||
complete(req, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -39,14 +39,14 @@ int raw_volatile_flush_all(struct ocf_cache *cache,
|
||||
/*
|
||||
* RAM RAW volatile Implementation - Mark to Flush
|
||||
*/
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
|
||||
|
||||
/*
|
||||
* RAM RAW volatile Implementation - Do Flush asynchronously
|
||||
*/
|
||||
int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, struct ocf_metadata_raw *raw,
|
||||
struct ocf_request *req, struct ocf_metadata_raw *raw,
|
||||
ocf_req_end_t complete);
|
||||
|
||||
#endif /* __METADATA_RAW_VOLATILE_H__ */
|
||||
|
@@ -195,7 +195,7 @@ struct ocf_metadata_iface {
|
||||
* @param[in] cache - Cache instance
|
||||
* @param[in] line - cache line which to be flushed
|
||||
*/
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq,
|
||||
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
|
||||
uint32_t map_idx, int to_state, uint8_t start,
|
||||
uint8_t stop);
|
||||
|
||||
@@ -209,7 +209,7 @@ struct ocf_metadata_iface {
|
||||
* @param context - context that will be passed into callback
|
||||
*/
|
||||
void (*flush_do_asynch)(struct ocf_cache *cache,
|
||||
struct ocf_request *rq, ocf_req_end_t complete);
|
||||
struct ocf_request *req, ocf_req_end_t complete);
|
||||
|
||||
|
||||
/* TODO Provide documentation below */
|
||||
|
@@ -142,7 +142,7 @@ uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu)
|
||||
}
|
||||
env_mutex_unlock(&syncher->lock);
|
||||
if (ret == 0)
|
||||
ocf_engine_push_rq_front(&curr->fl_req, true);
|
||||
ocf_engine_push_req_front(&curr->fl_req, true);
|
||||
env_cond_resched();
|
||||
env_mutex_lock(&syncher->lock);
|
||||
}
|
||||
|
Reference in New Issue
Block a user