Merge pull request #82 from robertbaldyga/asynchronous-metadata

Handle metadata asynchronously
This commit is contained in:
Michał Wysoczański 2019-03-26 12:54:26 +01:00 committed by GitHub
commit 03c95d36f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1149 additions and 1427 deletions

View File

@ -810,14 +810,6 @@ int ocf_req_trylock_rd(struct ocf_request *req)
return _ocf_req_lock_rd_common(req, req, _req_on_lock); return _ocf_req_lock_rd_common(req, req, _req_on_lock);
} }
/*
* Lock wait request context
*/
struct _req_wait_context {
struct ocf_request *req;
env_completion cmpl;
};
/* /*
* *
*/ */

View File

@ -111,37 +111,23 @@ ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache)
void ocf_metadata_flush_all(ocf_cache_t cache, void ocf_metadata_flush_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result;
OCF_METADATA_LOCK_WR(); OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.flush_all(cache); cache->metadata.iface.flush_all(cache, cmpl, priv);
OCF_METADATA_UNLOCK_WR(); OCF_METADATA_UNLOCK_WR();
cmpl(priv, result);
}
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush(cache, line);
} }
void ocf_metadata_load_all(ocf_cache_t cache, void ocf_metadata_load_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result;
OCF_METADATA_LOCK_WR(); OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.load_all(cache); cache->metadata.iface.load_all(cache, cmpl, priv);
OCF_METADATA_UNLOCK_WR(); OCF_METADATA_UNLOCK_WR();
cmpl(priv, result);
} }
void ocf_metadata_load_recovery(ocf_cache_t cache, void ocf_metadata_load_recovery(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result; cache->metadata.iface.load_recovery(cache, cmpl, priv);
result = cache->metadata.iface.load_recovery(cache);
cmpl(priv, result);
} }
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req, void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,

View File

@ -6,6 +6,7 @@
#ifndef __METADATA_H__ #ifndef __METADATA_H__
#define __METADATA_H__ #define __METADATA_H__
#include "metadata_common.h"
#include "../ocf_cache_priv.h" #include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h" #include "../ocf_ctx_priv.h"
@ -124,8 +125,6 @@ static inline void ocf_metadata_status_bits_unlock(
#define OCF_METADATA_FLUSH_UNLOCK() \ #define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache) ocf_metadata_flush_unlock(cache)
typedef void (*ocf_metadata_end_t)(void *priv, int error);
#include "metadata_cleaning_policy.h" #include "metadata_cleaning_policy.h"
#include "metadata_eviction_policy.h" #include "metadata_eviction_policy.h"
#include "metadata_partition.h" #include "metadata_partition.h"
@ -233,15 +232,6 @@ ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
void ocf_metadata_flush_all(ocf_cache_t cache, void ocf_metadata_flush_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv); ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
/** /**
* @brief Mark specified cache line to be flushed * @brief Mark specified cache line to be flushed
* *
@ -283,6 +273,17 @@ void ocf_metadata_load_all(ocf_cache_t cache,
void ocf_metadata_load_recovery(ocf_cache_t cache, void ocf_metadata_load_recovery(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv); ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Get reserved area lba
*
* @param cache Cache instance
*/
static inline uint64_t ocf_metadata_get_reserved_lba(ocf_cache_t cache)
{
return cache->metadata.iface.get_reserved_lba(cache);
}
/* /*
* NOTE Hash table is specific for hash table metadata service implementation * NOTE Hash table is specific for hash table metadata service implementation
* and should be used internally by metadata service. * and should be used internally by metadata service.
@ -302,12 +303,6 @@ static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
cache->metadata.iface.set_hash(cache, index, line); cache->metadata.iface.set_hash(cache, index, line);
} }
static inline void ocf_metadata_flush_hash(struct ocf_cache *cache,
ocf_cache_line_t index)
{
cache->metadata.iface.flush_hash(cache, index);
}
static inline ocf_cache_line_t ocf_metadata_entries_hash( static inline ocf_cache_line_t ocf_metadata_entries_hash(
struct ocf_cache *cache) struct ocf_cache *cache)
{ {

View File

@ -26,14 +26,4 @@ ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
cache->metadata.iface.set_cleaning_policy(cache, line, policy); cache->metadata.iface.set_cleaning_policy(cache, line, policy);
} }
/*
* FLUSH
*/
static inline void
ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
cache->metadata.iface.flush_cleaning_policy(cache, line);
}
#endif /* METADATA_CLEANING_POLICY_H_ */ #endif /* METADATA_CLEANING_POLICY_H_ */

View File

@ -0,0 +1,12 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_COMMON_H__
#define __METADATA_COMMON_H__
typedef void (*ocf_metadata_end_t)(void *priv, int error);
#endif /* __METADATA_COMMON_H__ */

View File

@ -23,13 +23,4 @@ static inline void ocf_metadata_set_evicition_policy(
cache->metadata.iface.set_eviction_policy(cache, line, eviction); cache->metadata.iface.set_eviction_policy(cache, line, eviction);
} }
/*
* FLUSH
*/
static inline void ocf_metadata_flush_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush_eviction_policy(cache, line);
}
#endif /* METADATA_EVICTION_H_ */ #endif /* METADATA_EVICTION_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,7 @@
#define OCF_DEBUG_PARAM(cache, format, ...) #define OCF_DEBUG_PARAM(cache, format, ...)
#endif #endif
static void metadata_io_write_i_asynch_end(struct metadata_io_request *request, static void metadata_io_i_asynch_end(struct metadata_io_request *request,
int error); int error);
static int ocf_restart_meta_io(struct ocf_request *req); static int ocf_restart_meta_io(struct ocf_request *req);
@ -64,9 +64,12 @@ static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
/* /*
* Iterative read request * Iterative read request
* TODO: Make this function asynchronous to enable async recovery
* in atomic mode.
*/ */
int metadata_io_read_i_atomic(ocf_cache_t cache, int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
ocf_metadata_atomic_io_event_t hndl) void *context, ocf_metadata_atomic_io_event_t drain_hndl,
ocf_metadata_io_end_t compl_hndl)
{ {
uint64_t i; uint64_t i;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE; uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
@ -84,7 +87,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Allocate one 4k page for metadata*/ /* Allocate one 4k page for metadata*/
data = ctx_data_alloc(cache->owner, 1); data = ctx_data_alloc(cache->owner, 1);
if (!data) if (!data)
return -ENOMEM; return -OCF_ERR_NO_MEM;
count = io_sectors_count; count = io_sectors_count;
for (i = 0; i < io_sectors_count; i += curr_count) { for (i = 0; i < io_sectors_count; i += curr_count) {
@ -100,7 +103,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Allocate new IO */ /* Allocate new IO */
io = ocf_new_cache_io(cache); io = ocf_new_cache_io(cache);
if (!io) { if (!io) {
result = -ENOMEM; result = -OCF_ERR_NO_MEM;
break; break;
} }
@ -131,7 +134,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
break; break;
} }
result |= hndl(cache, i, curr_count, data); result |= drain_hndl(cache, i, curr_count, data);
if (result) if (result)
break; break;
@ -143,41 +146,59 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Memory free */ /* Memory free */
ctx_data_free(cache->owner, data); ctx_data_free(cache->owner, data);
return result; compl_hndl(cache, context, result);
return 0;
} }
static void metadata_io_write_i_asynch_cmpl(struct ocf_io *io, int error) static void metadata_io_i_asynch_cmpl(struct ocf_io *io, int error)
{ {
struct metadata_io_request *request = io->priv1; struct metadata_io_request *request = io->priv1;
metadata_io_write_i_asynch_end(request, error); metadata_io_i_asynch_end(request, error);
ocf_io_put(io); ocf_io_put(io);
} }
static int ocf_restart_meta_io(struct ocf_request *req) static void metadata_io_req_fill(struct metadata_io_request *meta_io_req)
{ {
struct ocf_io *io; ocf_cache_t cache = meta_io_req->cache;
struct metadata_io_request *meta_io_req;
ocf_cache_t cache;
int i; int i;
int ret;
cache = req->cache;
meta_io_req = req->priv;
/* Fill with the latest metadata. */
OCF_METADATA_LOCK_RD();
for (i = 0; i < meta_io_req->count; i++) { for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_fill(cache, meta_io_req->data, meta_io_req->on_meta_fill(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context); meta_io_req->page + i, meta_io_req->context);
} }
}
static void metadata_io_req_drain(struct metadata_io_request *meta_io_req)
{
ocf_cache_t cache = meta_io_req->cache;
int i;
for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_drain(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context);
}
}
static int ocf_restart_meta_io(struct ocf_request *req)
{
struct metadata_io_request *meta_io_req = req->priv;
ocf_cache_t cache = req->cache;
struct ocf_io *io;
int ret;
cache = req->cache;
/* Fill with the latest metadata. */
OCF_METADATA_LOCK_RD();
metadata_io_req_fill(meta_io_req);
OCF_METADATA_UNLOCK_RD(); OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache); io = ocf_new_cache_io(cache);
if (!io) { if (!io) {
metadata_io_write_i_asynch_end(meta_io_req, -ENOMEM); metadata_io_i_asynch_end(meta_io_req, -OCF_ERR_NO_MEM);
return 0; return 0;
} }
@ -187,11 +208,11 @@ static int ocf_restart_meta_io(struct ocf_request *req)
PAGES_TO_BYTES(meta_io_req->count), PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0); OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_write_i_asynch_cmpl); ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_i_asynch_cmpl);
ret = ocf_io_set_data(io, meta_io_req->data, 0); ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) { if (ret) {
ocf_io_put(io); ocf_io_put(io);
metadata_io_write_i_asynch_end(meta_io_req, ret); metadata_io_i_asynch_end(meta_io_req, ret);
return ret; return ret;
} }
ocf_volume_submit_io(io); ocf_volume_submit_io(io);
@ -201,7 +222,7 @@ static int ocf_restart_meta_io(struct ocf_request *req)
/* /*
* Iterative asynchronous write callback * Iterative asynchronous write callback
*/ */
static void metadata_io_write_i_asynch_end(struct metadata_io_request *request, static void metadata_io_i_asynch_end(struct metadata_io_request *request,
int error) int error)
{ {
struct metadata_io_request_asynch *a_req; struct metadata_io_request_asynch *a_req;
@ -218,6 +239,9 @@ static void metadata_io_write_i_asynch_end(struct metadata_io_request *request,
if (error) { if (error) {
request->error |= error; request->error |= error;
request->asynch->error |= error; request->asynch->error |= error;
} else {
if (request->fl_req.rw == OCF_READ)
metadata_io_req_drain(request);
} }
if (env_atomic_dec_return(&request->req_remaining)) if (env_atomic_dec_return(&request->req_remaining))
@ -262,15 +286,15 @@ static void metadata_io_req_error(ocf_cache_t cache,
/* /*
* Iterative write request asynchronously * Iterative write request asynchronously
*/ */
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue, static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
void *context, uint32_t page, uint32_t count, void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl, ocf_metadata_io_event_t io_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl) ocf_metadata_io_end_t compl_hndl)
{ {
uint32_t curr_count, written; uint32_t curr_count, written;
uint32_t max_count = metadata_io_max_page(cache); uint32_t max_count = metadata_io_max_page(cache);
uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count); uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count);
uint32_t i, i_fill; uint32_t i;
int error = 0, ret; int error = 0, ret;
struct ocf_io *io; struct ocf_io *io;
@ -322,12 +346,14 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
a_req->reqs[i].context = context; a_req->reqs[i].context = context;
a_req->reqs[i].page = page + written; a_req->reqs[i].page = page + written;
a_req->reqs[i].count = curr_count; a_req->reqs[i].count = curr_count;
a_req->reqs[i].on_meta_fill = fill_hndl; a_req->reqs[i].on_meta_fill = io_hndl;
a_req->reqs[i].on_meta_drain = io_hndl;
a_req->reqs[i].fl_req.io_if = &meta_restart_if; a_req->reqs[i].fl_req.io_if = &meta_restart_if;
a_req->reqs[i].fl_req.io_queue = queue; a_req->reqs[i].fl_req.io_queue = queue;
a_req->reqs[i].fl_req.cache = cache; a_req->reqs[i].fl_req.cache = cache;
a_req->reqs[i].fl_req.priv = &a_req->reqs[i]; a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
a_req->reqs[i].fl_req.info.internal = true; a_req->reqs[i].fl_req.info.internal = true;
a_req->reqs[i].fl_req.rw = dir;
/* /*
* We don't want allocate map for this request in * We don't want allocate map for this request in
@ -355,20 +381,17 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
break; break;
} }
for (i_fill = 0; i_fill < curr_count; i_fill++) { if (dir == OCF_WRITE)
fill_hndl(cache, a_req->reqs[i].data, metadata_io_req_fill(&a_req->reqs[i]);
page + written + i_fill,
context);
}
/* Setup IO */ /* Setup IO */
ocf_io_configure(io, ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page), PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count), PAGES_TO_BYTES(a_req->reqs[i].count),
OCF_WRITE, 0, 0); dir, 0, 0);
ocf_io_set_cmpl(io, &a_req->reqs[i], NULL, ocf_io_set_cmpl(io, &a_req->reqs[i], NULL,
metadata_io_write_i_asynch_cmpl); metadata_io_i_asynch_cmpl);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0); error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
if (error) { if (error) {
ocf_io_put(io); ocf_io_put(io);
@ -423,6 +446,24 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
return error; return error;
} }
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_end_t compl_hndl)
{
return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
page, count, fill_hndl, compl_hndl);
}
int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t drain_hndl,
ocf_metadata_io_end_t compl_hndl)
{
return metadata_io_i_asynch(cache, queue, OCF_READ, context,
page, count, drain_hndl, compl_hndl);
}
int ocf_metadata_io_init(ocf_cache_t cache) int ocf_metadata_io_init(ocf_cache_t cache)
{ {
return ocf_metadata_updater_init(cache); return ocf_metadata_updater_init(cache);
@ -432,206 +473,3 @@ void ocf_metadata_io_deinit(ocf_cache_t cache)
{ {
ocf_metadata_updater_stop(cache); ocf_metadata_updater_stop(cache);
} }
static void metadata_io_end(struct ocf_io *io, int error)
{
struct metadata_io *mio = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
uint32_t page = BYTES_TO_PAGES(io->addr);
uint32_t count = BYTES_TO_PAGES(io->bytes);
ocf_cache_t cache = mio->cache;
uint32_t i = 0;
if (error) {
mio->error |= error;
goto out;
}
for (i = 0; mio->dir == OCF_READ && i < count; i++) {
mio->error |= mio->hndl_fn(cache, data, page + i,
mio->hndl_cntx);
}
out:
ctx_data_free(cache->owner, data);
ocf_io_put(io);
if (env_atomic_dec_return(&mio->req_remaining))
return;
env_completion_complete(&mio->completion);
}
static int metadata_submit_io(
ocf_cache_t cache,
struct metadata_io *mio,
uint32_t count,
uint32_t written)
{
ctx_data_t *data;
struct ocf_io *io;
int err;
int i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
err = -ENOMEM;
goto error;
}
/* Allocate data buffer for this IO */
data = ctx_data_alloc(cache->owner, count);
if (!data) {
err = -ENOMEM;
goto put_io;
}
/* Fill data */
for (i = 0; mio->dir == OCF_WRITE && i < count; i++) {
err = mio->hndl_fn(cache, data,
mio->page + written + i, mio->hndl_cntx);
if (err)
goto free_data;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(mio->page + written),
PAGES_TO_BYTES(count),
mio->dir, 0, 0);
ocf_io_set_cmpl(io, mio, NULL, metadata_io_end);
err = ocf_io_set_data(io, data, 0);
if (err)
goto free_data;
/* Submit IO */
env_atomic_inc(&mio->req_remaining);
ocf_volume_submit_io(io);
return 0;
free_data:
ctx_data_free(cache->owner, data);
put_io:
ocf_io_put(io);
error:
mio->error = err;
return err;
}
/*
*
*/
static int metadata_io(struct metadata_io *mio)
{
uint32_t max_count = metadata_io_max_page(mio->cache);
uint32_t this_count, written = 0;
uint32_t count = mio->count;
unsigned char step = 0;
int err;
ocf_cache_t cache = mio->cache;
/* Check direction value correctness */
switch (mio->dir) {
case OCF_WRITE:
case OCF_READ:
break;
default:
return -EINVAL;
}
env_atomic_set(&mio->req_remaining, 1);
env_completion_init(&mio->completion);
while (count) {
this_count = OCF_MIN(count, max_count);
err = metadata_submit_io(cache, mio, this_count, written);
if (err)
break;
/* Update counters */
count -= this_count;
written += this_count;
OCF_COND_RESCHED(step, 128);
}
if (env_atomic_dec_return(&mio->req_remaining) == 0)
env_completion_complete(&mio->completion);
/* Wait for all IO to be finished */
env_completion_wait(&mio->completion);
return mio->error;
}
/*
*
*/
int metadata_io_write_i(ocf_cache_t cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
int metadata_io_read_i(ocf_cache_t cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_READ,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
static int metadata_io_write_fill(ocf_cache_t cache,
ctx_data_t *data, uint32_t page, void *context)
{
ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE);
return 0;
}
/*
* Write request
*/
int metadata_io_write(ocf_cache_t cache,
void *data, uint32_t page)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = 1,
.hndl_fn = metadata_io_write_fill,
.hndl_cntx = data,
};
return metadata_io(&mio);
}

View File

@ -36,7 +36,7 @@ typedef int (*ocf_metadata_io_event_t)(ocf_cache_t cache,
* @param error - error * @param error - error
* @param page - page that was written * @param page - page that was written
*/ */
typedef void (*ocf_metadata_io_hndl_on_write_t)(ocf_cache_t cache, typedef void (*ocf_metadata_io_end_t)(ocf_cache_t cache,
void *context, int error); void *context, int error);
struct metadata_io_request_asynch; struct metadata_io_request_asynch;
@ -50,9 +50,9 @@ struct metadata_io_request {
uint32_t page; uint32_t page;
uint32_t count; uint32_t count;
ocf_metadata_io_event_t on_meta_fill; ocf_metadata_io_event_t on_meta_fill;
ocf_metadata_io_event_t on_meta_drain;
env_atomic req_remaining; env_atomic req_remaining;
ctx_data_t *data; ctx_data_t *data;
env_completion completion;
int error; int error;
struct metadata_io_request_asynch *asynch; struct metadata_io_request_asynch *asynch;
env_atomic finished; env_atomic finished;
@ -69,21 +69,6 @@ struct metadata_io_request_atomic {
int error; int error;
}; };
/*
*
*/
struct metadata_io {
int error;
int dir;
ocf_cache_t cache;
uint32_t page;
uint32_t count;
env_completion completion;
env_atomic req_remaining;
ocf_metadata_io_event_t hndl_fn;
void *hndl_cntx;
};
/* /*
* Asynchronous IO request context * Asynchronous IO request context
*/ */
@ -96,7 +81,7 @@ struct metadata_io_request_asynch {
env_atomic req_remaining; env_atomic req_remaining;
env_atomic req_active; env_atomic req_active;
uint32_t page; uint32_t page;
ocf_metadata_io_hndl_on_write_t on_complete; ocf_metadata_io_end_t on_complete;
}; };
/** /**
@ -110,70 +95,59 @@ struct metadata_io_request_asynch {
* @retval 0 Success * @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller * @retval Non-zero Error which will bee finally returned to the caller
*/ */
typedef int (*ocf_metadata_atomic_io_event_t)( typedef int (*ocf_metadata_atomic_io_event_t)(void *priv, uint64_t sector_addr,
ocf_cache_t cache, uint64_t sector_addr,
uint32_t sector_no, ctx_data_t *data); uint32_t sector_no, ctx_data_t *data);
/** /**
* @brief Write page request * @brief Iterative asynchronous read atomic metadata
* *
* @param cache - Cache instance * @param cache - Cache instance
* @param data - Data to be written for specified page * @param queue - Queue to be used for IO
* @param page - Page of SSD (cache device) where data has to be placed * @param context - Read context
* @return 0 - No errors, otherwise error occurred * @param drain_hndl - Drain callback
*/ * @param compl_hndl - All IOs completed callback
int metadata_io_write(ocf_cache_t cache,
void *data, uint32_t page);
int metadata_io_read_i_atomic(ocf_cache_t cache,
ocf_metadata_atomic_io_event_t hndl);
/**
* @brief Iterative pages write
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param hndl_fn - Fill callback is called to fill each pages with data
* @param hndl_cntx - Caller context which is passed on fill callback request
* *
* @return 0 - No errors, otherwise error occurred * @return 0 - No errors, otherwise error occurred
*/ */
int metadata_io_write_i(ocf_cache_t cache, int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
uint32_t page, uint32_t count, void *context, ocf_metadata_atomic_io_event_t drain_hndl,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx); ocf_metadata_io_end_t compl_hndl);
/**
* * @brief Iterative pages read
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) of data will be read
* @param count - Counts of page to be processed
* @param hndl_fn - Callback function is called on each page read completion
* @param hndl_cntx - Caller context passed during handle function call
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_read_i(ocf_cache_t cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/** /**
* @brief Iterative asynchronous pages write * @brief Iterative asynchronous pages write
* *
* @param cache - Cache instance * @param cache - Cache instance
* @param queue - Queue to be used for IO
* @param context - Read context * @param context - Read context
* @param page - Start page of SSD (cache device) where data will be written * @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed * @param count - Counts of page to be processed
* @param fill - Fill callback * @param fill_hndl - Fill callback
* @param complete - All IOs completed callback * @param compl_hndl - All IOs completed callback
* *
* @return 0 - No errors, otherwise error occurred * @return 0 - No errors, otherwise error occurred
*/ */
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count, void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl, ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl); ocf_metadata_io_end_t compl_hndl);
/**
* @brief Iterative asynchronous pages read
*
* @param cache - Cache instance
* @param queue - Queue to be used for IO
* @param context - Read context
* @param page - Start page of SSD (cache device) where data will be read
* @param count - Counts of page to be processed
* @param drain_hndl - Drain callback
* @param compl_hndl - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t drain_hndl,
ocf_metadata_io_end_t compl_hndl);
/** /**
* Function for initializing metadata io. * Function for initializing metadata io.

View File

@ -197,31 +197,23 @@ static int _raw_ram_set(ocf_cache_t cache,
return _RAW_RAM_SET(raw, line, data); return _RAW_RAM_SET(raw, line, data);
} }
/* struct _raw_ram_load_all_context {
* RAM Implementation - Flush specified element from SSD struct ocf_metadata_raw *raw;
*/ ocf_metadata_end_t cmpl;
static int _raw_ram_flush(ocf_cache_t cache, void *priv;
struct ocf_metadata_raw *raw, ocf_cache_line_t line) };
{
OCF_DEBUG_PARAM(cache, "Line = %u", line);
OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line));
ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size));
return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line),
_RAW_RAM_PAGE_SSD(raw, line));
}
/* /*
* RAM Implementation - Load all IO callback * RAM Implementation - Load all IO callback
*/ */
static int _raw_ram_load_all_io(ocf_cache_t cache, static int _raw_ram_load_all_drain(ocf_cache_t cache,
ctx_data_t *data, uint32_t page, void *context) ctx_data_t *data, uint32_t page, void *priv)
{ {
struct _raw_ram_load_all_context *context = priv;
struct ocf_metadata_raw *raw = context->raw;
uint32_t size = raw->entry_size * raw->entries_in_page;
ocf_cache_line_t line; ocf_cache_line_t line;
uint32_t raw_page; uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE); ENV_BUG_ON(size > PAGE_SIZE);
@ -238,28 +230,60 @@ static int _raw_ram_load_all_io(ocf_cache_t cache,
return 0; return 0;
} }
static void _raw_ram_load_all_complete(ocf_cache_t cache,
void *priv, int error)
{
struct _raw_ram_load_all_context *context = priv;
context->cmpl(context->priv, error);
env_vfree(context);
}
/* /*
* RAM Implementation - Load all metadata elements from SSD * RAM Implementation - Load all metadata elements from SSD
*/ */
static int _raw_ram_load_all(ocf_cache_t cache, static void _raw_ram_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw) ocf_metadata_end_t cmpl, void *priv)
{ {
struct _raw_ram_load_all_context *context;
int result;
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
return metadata_io_read_i(cache, raw->ssd_pages_offset, context = env_vmalloc(sizeof(*context));
raw->ssd_pages, _raw_ram_load_all_io, raw); if (!context) {
cmpl(priv, -OCF_ERR_NO_MEM);
return;
}
context->raw = raw;
context->cmpl = cmpl;
context->priv = priv;
result = metadata_io_read_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages,
_raw_ram_load_all_drain, _raw_ram_load_all_complete);
if (result)
_raw_ram_load_all_complete(cache, context, result);
} }
struct _raw_ram_flush_all_context {
struct ocf_metadata_raw *raw;
ocf_metadata_end_t cmpl;
void *priv;
};
/* /*
* RAM Implementation - Flush IO callback - Fill page * RAM Implementation - Flush IO callback - Fill page
*/ */
static int _raw_ram_flush_all_fill(ocf_cache_t cache, static int _raw_ram_flush_all_fill(ocf_cache_t cache,
ctx_data_t *data, uint32_t page, void *context) ctx_data_t *data, uint32_t page, void *priv)
{ {
struct _raw_ram_flush_all_context *context = priv;
struct ocf_metadata_raw *raw = context->raw;
uint32_t size = raw->entry_size * raw->entries_in_page;
ocf_cache_line_t line; ocf_cache_line_t line;
uint32_t raw_page; uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE); ENV_BUG_ON(size > PAGE_SIZE);
@ -275,16 +299,41 @@ static int _raw_ram_flush_all_fill(ocf_cache_t cache,
return 0; return 0;
} }
static void _raw_ram_flush_all_complete(ocf_cache_t cache,
void *priv, int error)
{
struct _raw_ram_flush_all_context *context = priv;
context->cmpl(context->priv, error);
env_vfree(context);
}
/* /*
* RAM Implementation - Flush all elements * RAM Implementation - Flush all elements
*/ */
static int _raw_ram_flush_all(ocf_cache_t cache, static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw) ocf_metadata_end_t cmpl, void *priv)
{ {
struct _raw_ram_flush_all_context *context;
int result;
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset, context = env_vmalloc(sizeof(*context));
raw->ssd_pages, _raw_ram_flush_all_fill, raw); if (!context) {
cmpl(priv, -OCF_ERR_NO_MEM);
return;
}
context->raw = raw;
context->cmpl = cmpl;
context->priv = priv;
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages,
_raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
if (result)
_raw_ram_flush_all_complete(cache, context, result);
} }
/* /*
@ -515,7 +564,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set, .set = _raw_ram_set,
.rd_access = _raw_ram_rd_access, .rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access, .wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all, .load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all, .flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark, .flush_mark = _raw_ram_flush_mark,
@ -531,7 +579,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = raw_dynamic_set, .set = raw_dynamic_set,
.rd_access = raw_dynamic_rd_access, .rd_access = raw_dynamic_rd_access,
.wr_access = raw_dynamic_wr_access, .wr_access = raw_dynamic_wr_access,
.flush = raw_dynamic_flush,
.load_all = raw_dynamic_load_all, .load_all = raw_dynamic_load_all,
.flush_all = raw_dynamic_flush_all, .flush_all = raw_dynamic_flush_all,
.flush_mark = raw_dynamic_flush_mark, .flush_mark = raw_dynamic_flush_mark,
@ -547,7 +594,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set, .set = _raw_ram_set,
.rd_access = _raw_ram_rd_access, .rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access, .wr_access = _raw_ram_wr_access,
.flush = raw_volatile_flush,
.load_all = raw_volatile_load_all, .load_all = raw_volatile_load_all,
.flush_all = raw_volatile_flush_all, .flush_all = raw_volatile_flush_all,
.flush_mark = raw_volatile_flush_mark, .flush_mark = raw_volatile_flush_mark,
@ -563,7 +609,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set, .set = _raw_ram_set,
.rd_access = _raw_ram_rd_access, .rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access, .wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all, .load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all, .flush_all = _raw_ram_flush_all,
.flush_mark = raw_atomic_flush_mark, .flush_mark = raw_atomic_flush_mark,

View File

@ -81,13 +81,13 @@ struct ocf_metadata_raw {
* RAW container interface * RAW container interface
*/ */
struct raw_iface { struct raw_iface {
int (*init)(struct ocf_cache *cache, int (*init)(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
int (*deinit)(struct ocf_cache *cache, int (*deinit)(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
size_t (*size_of)(struct ocf_cache *cache, size_t (*size_of)(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/** /**
@ -98,43 +98,40 @@ struct raw_iface {
* *
* @return Number of pages (4 kiB) on cache device * @return Number of pages (4 kiB) on cache device
*/ */
uint32_t (*size_on_ssd)(struct ocf_cache *cache, uint32_t (*size_on_ssd)(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
uint32_t (*checksum)(struct ocf_cache *cache, uint32_t (*checksum)(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
int (*get)(struct ocf_cache *cache, int (*get)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size); void *data, uint32_t size);
int (*set)(struct ocf_cache *cache, int (*set)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size); void *data, uint32_t size);
const void* (*rd_access)(struct ocf_cache *cache, const void* (*rd_access)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size); uint32_t size);
void* (*wr_access)(struct ocf_cache *cache, void* (*wr_access)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size); ocf_cache_line_t line, uint32_t size);
int (*flush)(struct ocf_cache *cache, void (*load_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw, ocf_cache_line_t line); ocf_metadata_end_t cmpl, void *priv);
int (*load_all)(struct ocf_cache *cache, void (*flush_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw); ocf_metadata_end_t cmpl, void *priv);
int (*flush_all)(struct ocf_cache *cache, void (*flush_mark)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw);
void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop); uint8_t stop);
int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *req, int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, struct ocf_metadata_raw *raw,
ocf_req_end_t complete); ocf_req_end_t complete);
}; };
@ -146,7 +143,7 @@ struct raw_iface {
* @param raw - RAW descriptor * @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error * @return 0 - Operation success, otherwise error
*/ */
int ocf_metadata_raw_init(struct ocf_cache *cache, int ocf_metadata_raw_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/** /**
@ -156,7 +153,7 @@ int ocf_metadata_raw_init(struct ocf_cache *cache,
* @param raw - RAW descriptor * @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error * @return 0 - Operation success, otherwise error
*/ */
int ocf_metadata_raw_deinit(struct ocf_cache *cache, int ocf_metadata_raw_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/** /**
@ -166,7 +163,7 @@ int ocf_metadata_raw_deinit(struct ocf_cache *cache,
* @param raw RAW descriptor * @param raw RAW descriptor
* @return Memory footprint * @return Memory footprint
*/ */
static inline size_t ocf_metadata_raw_size_of(struct ocf_cache *cache, static inline size_t ocf_metadata_raw_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
if (!raw->iface) if (!raw->iface)
@ -208,7 +205,7 @@ static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache,
* @param size - Size of data * @param size - Size of data
* @return 0 - Operation success, otherwise error * @return 0 - Operation success, otherwise error
*/ */
static inline int ocf_metadata_raw_get(struct ocf_cache *cache, static inline int ocf_metadata_raw_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data, struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size) uint32_t size)
{ {
@ -225,7 +222,7 @@ static inline int ocf_metadata_raw_get(struct ocf_cache *cache,
* @param size - Size of data * @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL * @return 0 - Point to accessed data, in case of error NULL
*/ */
static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache, static inline void *ocf_metadata_raw_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size) uint32_t size)
{ {
@ -243,7 +240,7 @@ static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache,
* @return 0 - Point to accessed data, in case of error NULL * @return 0 - Point to accessed data, in case of error NULL
*/ */
static inline const void *ocf_metadata_raw_rd_access( static inline const void *ocf_metadata_raw_rd_access(
struct ocf_cache *cache, struct ocf_metadata_raw *raw, ocf_cache_t cache, struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size) ocf_cache_line_t line, uint32_t size)
{ {
return raw->iface->rd_access(cache, raw, line, size); return raw->iface->rd_access(cache, raw, line, size);
@ -259,38 +256,26 @@ static inline const void *ocf_metadata_raw_rd_access(
* @param size - Size of data * @param size - Size of data
* @return 0 - Operation success, otherwise error * @return 0 - Operation success, otherwise error
*/ */
static inline int ocf_metadata_raw_set(struct ocf_cache *cache, static inline int ocf_metadata_raw_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data, struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size) uint32_t size)
{ {
return raw->iface->set(cache, raw, line, data, size); return raw->iface->set(cache, raw, line, data, size);
} }
/**
* @brief Flush specified element of metadata into SSD
*
* @param cache - Cache instance
* @param raw - RAW descriptor
* @param line - Cache line to be flushed
* @return 0 - Operation success, otherwise error
*/
static inline int ocf_metadata_raw_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return raw->iface->flush(cache, raw, line);
}
/** /**
* @brief Load all entries from SSD cache (cahce cache) * @brief Load all entries from SSD cache (cahce cache)
* *
* @param cache - Cache instance * @param cache - Cache instance
* @param raw - RAW descriptor * @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error * @param cmpl - Completion callback
* @param priv - Completion callback context
*/ */
static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache, static inline void ocf_metadata_raw_load_all(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw,
ocf_metadata_end_t cmpl, void *priv)
{ {
return raw->iface->load_all(cache, raw); raw->iface->load_all(cache, raw, cmpl, priv);
} }
/** /**
@ -298,23 +283,25 @@ static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache,
* *
* @param cache - Cache instance * @param cache - Cache instance
* @param raw - RAW descriptor * @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error * @param cmpl - Completion callback
* @param priv - Completion callback context
*/ */
static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache, static inline void ocf_metadata_raw_flush_all(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw,
ocf_metadata_end_t cmpl, void *priv)
{ {
return raw->iface->flush_all(cache, raw); raw->iface->flush_all(cache, raw, cmpl, priv);
} }
static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache, static inline void ocf_metadata_raw_flush_mark(ocf_cache_t cache,
struct ocf_metadata_raw *raw, struct ocf_request *req, struct ocf_metadata_raw *raw, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{ {
raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop); raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop);
} }
static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache, static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw, struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete) ocf_req_end_t complete)
{ {

View File

@ -8,7 +8,10 @@
#include "metadata_raw.h" #include "metadata_raw.h"
#include "metadata_raw_dynamic.h" #include "metadata_raw_dynamic.h"
#include "metadata_io.h" #include "metadata_io.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../utils/utils_io.h" #include "../utils/utils_io.h"
#include "../utils/utils_req.h"
#include "../ocf_def_priv.h" #include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0 #define OCF_METADATA_RAW_DEBUG 0
@ -59,7 +62,7 @@ struct _raw_ctrl {
void *pages[]; void *pages[];
}; };
static void *_raw_dynamic_get_item(struct ocf_cache *cache, static void *_raw_dynamic_get_item(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size) struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size)
{ {
void *new = NULL; void *new = NULL;
@ -110,7 +113,7 @@ _raw_dynamic_get_item_SKIP:
/* /*
* RAM DYNAMIC Implementation - De-Initialize * RAM DYNAMIC Implementation - De-Initialize
*/ */
int raw_dynamic_deinit(struct ocf_cache *cache, int raw_dynamic_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
uint32_t i; uint32_t i;
@ -133,7 +136,7 @@ int raw_dynamic_deinit(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - Initialize * RAM DYNAMIC Implementation - Initialize
*/ */
int raw_dynamic_init(struct ocf_cache *cache, int raw_dynamic_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
struct _raw_ctrl *ctrl; struct _raw_ctrl *ctrl;
@ -163,7 +166,7 @@ int raw_dynamic_init(struct ocf_cache *cache,
/* /*
* RAW DYNAMIC Implementation - Size of * RAW DYNAMIC Implementation - Size of
*/ */
size_t raw_dynamic_size_of(struct ocf_cache *cache, size_t raw_dynamic_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
@ -185,7 +188,7 @@ size_t raw_dynamic_size_of(struct ocf_cache *cache,
/* /*
* RAW DYNAMIC Implementation - Size on SSD * RAW DYNAMIC Implementation - Size on SSD
*/ */
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache, uint32_t raw_dynamic_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
const size_t alignment = 128 * KiB / PAGE_SIZE; const size_t alignment = 128 * KiB / PAGE_SIZE;
@ -196,7 +199,7 @@ uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - Checksum * RAM DYNAMIC Implementation - Checksum
*/ */
uint32_t raw_dynamic_checksum(struct ocf_cache *cache, uint32_t raw_dynamic_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
@ -216,7 +219,7 @@ uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - Get * RAM DYNAMIC Implementation - Get
*/ */
int raw_dynamic_get(struct ocf_cache *cache, int raw_dynamic_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size) void *data, uint32_t size)
{ {
@ -234,7 +237,7 @@ int raw_dynamic_get(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - Set * RAM DYNAMIC Implementation - Set
*/ */
int raw_dynamic_set(struct ocf_cache *cache, int raw_dynamic_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size) void *data, uint32_t size)
{ {
@ -251,7 +254,7 @@ int raw_dynamic_set(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - access * RAM DYNAMIC Implementation - access
*/ */
const void *raw_dynamic_rd_access(struct ocf_cache *cache, const void *raw_dynamic_rd_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size) uint32_t size)
{ {
@ -261,144 +264,240 @@ const void *raw_dynamic_rd_access(struct ocf_cache *cache,
/* /*
* RAM DYNAMIC Implementation - access * RAM DYNAMIC Implementation - access
*/ */
void *raw_dynamic_wr_access(struct ocf_cache *cache, void *raw_dynamic_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size) uint32_t size)
{ {
return _raw_dynamic_get_item(cache, raw, line, size); return _raw_dynamic_get_item(cache, raw, line, size);
} }
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
OCF_DEBUG_PARAM(cache, "Line %u, page = %u", line, page);
ENV_BUG_ON(!ctrl->pages[page]);
return metadata_io_write(cache, ctrl->pages[page],
raw->ssd_pages_offset + page);
}
/* /*
* RAM DYNAMIC Implementation - Load all * RAM DYNAMIC Implementation - Load all
*/ */
#define RAW_DYNAMIC_LOAD_PAGES 128 #define RAW_DYNAMIC_LOAD_PAGES 128
int raw_dynamic_load_all(struct ocf_cache *cache, struct raw_dynamic_load_all_context {
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw;
{ struct ocf_request *req;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; ocf_cache_t cache;
uint64_t i = 0, i_page = 0;
uint64_t count = RAW_DYNAMIC_LOAD_PAGES;
int error = 0, cmp;
struct ocf_io *io; struct ocf_io *io;
ctx_data_t *data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES); ctx_data_t *data;
char *page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL); uint8_t *zpage;
char *zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL); uint8_t *page;
uint64_t i;
int error;
if (!data || !page || !zpage) { ocf_metadata_end_t cmpl;
ctx_data_free(cache->owner, data); void *priv;
env_free(page); };
env_free(zpage);
return -ENOMEM; static void raw_dynamic_load_all_complete(
struct raw_dynamic_load_all_context *context, int error)
{
context->cmpl(context->priv, error);
ocf_req_put(context->req);
env_free(context->page);
env_free(context->zpage);
ctx_data_free(context->cache->owner, context->data);
env_vfree(context);
}
static int raw_dynamic_load_all_update(struct ocf_request *req);
static const struct ocf_io_if _io_if_raw_dynamic_load_all_update = {
.read = raw_dynamic_load_all_update,
.write = raw_dynamic_load_all_update,
};
static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
{
struct raw_dynamic_load_all_context *context = io->priv1;
ocf_io_put(io);
if (error) {
raw_dynamic_load_all_complete(context, error);
return;
} }
context->req->io_if = &_io_if_raw_dynamic_load_all_update;
ocf_engine_push_req_front(context->req, true);
}
static int raw_dynamic_load_all_read(struct ocf_request *req)
{
struct raw_dynamic_load_all_context *context = req->priv;
struct ocf_metadata_raw *raw = context->raw;
uint64_t count;
int result;
count = OCF_MIN(RAW_DYNAMIC_LOAD_PAGES, raw->ssd_pages - context->i);
/* Allocate IO */
context->io = ocf_new_cache_io(context->cache);
if (!context->io) {
raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
return 0;
}
/* Setup IO */
result = ocf_io_set_data(context->io, context->data, 0);
if (result) {
ocf_io_put(context->io);
raw_dynamic_load_all_complete(context, result);
return 0;
}
ocf_io_configure(context->io,
PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
ocf_io_set_queue(context->io, req->io_queue);
ocf_io_set_cmpl(context->io, context, NULL,
raw_dynamic_load_all_read_end);
/* Submit IO */
ocf_volume_submit_io(context->io);
return 0;
}
static const struct ocf_io_if _io_if_raw_dynamic_load_all_read = {
.read = raw_dynamic_load_all_read,
.write = raw_dynamic_load_all_read,
};
static int raw_dynamic_load_all_update(struct ocf_request *req)
{
struct raw_dynamic_load_all_context *context = req->priv;
struct ocf_metadata_raw *raw = context->raw;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
ocf_cache_t cache = context->cache;
uint64_t count = BYTES_TO_PAGES(context->io->bytes);
uint64_t i_page;
int result = 0;
int cmp;
/* Reset head of data buffer */
ctx_data_seek_check(context->cache->owner, context->data,
ctx_data_seek_begin, 0);
for (i_page = 0; i_page < count; i_page++, context->i++) {
if (!context->page) {
context->page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!context->page) {
/* Allocation error */
result = -OCF_ERR_NO_MEM;
break;
}
}
ctx_data_rd_check(cache->owner, context->page,
context->data, PAGE_SIZE);
result = env_memcmp(context->zpage, PAGE_SIZE, context->page,
PAGE_SIZE, &cmp);
if (result)
break;
/* When page is zero set, no need to allocate space for it */
if (cmp == 0) {
OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
continue;
}
OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
ctrl->pages[context->i] = context->page;
context->page = NULL;
env_atomic_inc(&ctrl->count);
}
if (result || context->i >= raw->ssd_pages) {
raw_dynamic_load_all_complete(context, result);
return 0;
}
context->req->io_if = &_io_if_raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true);
return 0;
}
void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
ocf_metadata_end_t cmpl, void *priv)
{
struct raw_dynamic_load_all_context *context;
int result;
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
/* Loading, need to load all metadata, when page is zero set, no need context = env_vzalloc(sizeof(*context));
* to allocate space for it if (!context) {
*/ cmpl(priv, -OCF_ERR_NO_MEM);
return;
while (i < raw->ssd_pages) {
if (i + count > raw->ssd_pages)
count = raw->ssd_pages - i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -ENOMEM;
break;
}
/* Setup IO */
error = ocf_io_set_data(io, data, 0);
if (error) {
ocf_io_put(io);
break;
}
ocf_io_configure(io,
PAGES_TO_BYTES(raw->ssd_pages_offset + i),
PAGES_TO_BYTES(count), OCF_READ, 0, 0);
/* Submit IO */
error = ocf_submit_io_wait(io);
ocf_io_put(io);
io = NULL;
if (error)
break;
/* Reset head of data buffer */
ctx_data_seek_check(cache->owner, data,
ctx_data_seek_begin, 0);
for (i_page = 0; i_page < count; i_page++, i++) {
if (!page) {
page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!page) {
/* Allocation error */
error = -ENOMEM;
break;
}
}
ctx_data_rd_check(cache->owner, page, data, PAGE_SIZE);
error = env_memcmp(zpage, PAGE_SIZE, page,
PAGE_SIZE, &cmp);
if (error)
break;
if (cmp == 0) {
OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
continue;
}
OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
ctrl->pages[i] = page;
page = NULL;
env_atomic_inc(&ctrl->count);
}
if (error)
break;
} }
env_free(zpage); context->raw = raw;
env_free(page); context->cache = cache;
ctx_data_free(cache->owner, data); context->cmpl = cmpl;
context->priv = priv;
return error; context->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
if (!context->data) {
result = -OCF_ERR_NO_MEM;
goto err_data;
}
context->zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!context->zpage) {
result = -OCF_ERR_NO_MEM;
goto err_zpage;
}
context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
if (!context->req) {
result = -OCF_ERR_NO_MEM;
goto err_req;
}
context->req->info.internal = true;
context->req->priv = context;
context->req->io_if = &_io_if_raw_dynamic_load_all_read;
ocf_engine_push_req_front(context->req, true);
return;
err_req:
env_free(context->zpage);
err_zpage:
ctx_data_free(cache->owner, context->data);
err_data:
env_vfree(context);
cmpl(priv, result);
} }
/* /*
* RAM DYNAMIC Implementation - Flush all * RAM DYNAMIC Implementation - Flush all
*/ */
struct raw_dynamic_flush_all_context {
struct ocf_metadata_raw *raw;
ocf_metadata_end_t cmpl;
void *priv;
};
/* /*
* RAM Implementation - Flush IO callback - Fill page * RAM Implementation - Flush IO callback - Fill page
*/ */
static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache, static int raw_dynamic_flush_all_fill(ocf_cache_t cache,
ctx_data_t *data, uint32_t page, void *context) ctx_data_t *data, uint32_t page, void *priv)
{ {
uint32_t raw_page; struct raw_dynamic_flush_all_context *context = priv;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context; struct ocf_metadata_raw *raw = context->raw;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
uint32_t raw_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
@ -417,18 +516,45 @@ static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache,
return 0; return 0;
} }
int raw_dynamic_flush_all(struct ocf_cache *cache, static void raw_dynamic_flush_all_complete(ocf_cache_t cache,
struct ocf_metadata_raw *raw) void *priv, int error)
{ {
struct raw_dynamic_flush_all_context *context = priv;
context->cmpl(context->priv, error);
env_vfree(context);
}
void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
ocf_metadata_end_t cmpl, void *priv)
{
struct raw_dynamic_flush_all_context *context;
int result;
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_dynamic_flush_all_fill, raw); context = env_vmalloc(sizeof(*context));
if (!context) {
cmpl(priv, -OCF_ERR_NO_MEM);
return;
}
context->raw = raw;
context->cmpl = cmpl;
context->priv = priv;
result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
raw->ssd_pages_offset, raw->ssd_pages,
raw_dynamic_flush_all_fill,
raw_dynamic_flush_all_complete);
if (result)
cmpl(priv, result);
} }
/* /*
* RAM DYNAMIC Implementation - Mark to Flush * RAM DYNAMIC Implementation - Mark to Flush
*/ */
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req, void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{ {
ENV_BUG(); ENV_BUG();
@ -437,7 +563,7 @@ void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
/* /*
* RAM DYNAMIC Implementation - Do flushing asynchronously * RAM DYNAMIC Implementation - Do flushing asynchronously
*/ */
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req, int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete) struct ocf_metadata_raw *raw, ocf_req_end_t complete)
{ {
ENV_BUG(); ENV_BUG();

View File

@ -14,91 +14,85 @@
/* /*
* RAW DYNAMIC - Initialize * RAW DYNAMIC - Initialize
*/ */
int raw_dynamic_init(struct ocf_cache *cache, int raw_dynamic_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW DYNAMIC - De-Initialize * RAW DYNAMIC - De-Initialize
*/ */
int raw_dynamic_deinit(struct ocf_cache *cache, int raw_dynamic_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW DYNAMIC - Get size of memory footprint of this RAW metadata container * RAW DYNAMIC - Get size of memory footprint of this RAW metadata container
*/ */
size_t raw_dynamic_size_of(struct ocf_cache *cache, size_t raw_dynamic_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW DYNAMIC Implementation - Size on SSD * RAW DYNAMIC Implementation - Size on SSD
*/ */
uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache, uint32_t raw_dynamic_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW DYNAMIC Implementation - Checksum * RAW DYNAMIC Implementation - Checksum
*/ */
uint32_t raw_dynamic_checksum(struct ocf_cache *cache, uint32_t raw_dynamic_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW DYNAMIC - Get specified entry * RAW DYNAMIC - Get specified entry
*/ */
int raw_dynamic_get(struct ocf_cache *cache, int raw_dynamic_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size); void *data, uint32_t size);
/* /*
* RAW DYNAMIC - Set specified entry * RAW DYNAMIC - Set specified entry
*/ */
int raw_dynamic_set(struct ocf_cache *cache, int raw_dynamic_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size); void *data, uint32_t size);
/* /*
* RAW DYNAMIC - Read only access for specified entry * RAW DYNAMIC - Read only access for specified entry
*/ */
const void *raw_dynamic_rd_access(struct ocf_cache *cache, const void *raw_dynamic_rd_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size); uint32_t size);
/* /*
* RAW DYNAMIC - Write access for specified entry * RAW DYNAMIC - Write access for specified entry
*/ */
void *raw_dynamic_wr_access(struct ocf_cache *cache, void *raw_dynamic_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size); uint32_t size);
/*
* RAW DYNAMIC - Flush specified entry
*/
int raw_dynamic_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/* /*
* RAW DYNAMIC - Load all metadata of this RAW metadata container * RAW DYNAMIC - Load all metadata of this RAW metadata container
* from cache device * from cache device
*/ */
int raw_dynamic_load_all(struct ocf_cache *cache, void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw); ocf_metadata_end_t cmpl, void *priv);
/* /*
* RAW DYNAMIC - Flush all metadata of this RAW metadata container * RAW DYNAMIC - Flush all metadata of this RAW metadata container
* to cache device * to cache device
*/ */
int raw_dynamic_flush_all(struct ocf_cache *cache, void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw); ocf_metadata_end_t cmpl, void *priv);
/* /*
* RAW DYNAMIC - Mark specified entry to be flushed * RAW DYNAMIC - Mark specified entry to be flushed
*/ */
void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req, void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/* /*
* DYNAMIC Implementation - Do Flush Asynchronously * DYNAMIC Implementation - Do Flush Asynchronously
*/ */
int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req, int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete); struct ocf_metadata_raw *raw, ocf_req_end_t complete);

View File

@ -12,7 +12,7 @@
/* /*
* RAW volatile Implementation - Size on SSD * RAW volatile Implementation - Size on SSD
*/ */
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache, uint32_t raw_volatile_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
return 0; return 0;
@ -21,43 +21,34 @@ uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
/* /*
* RAW volatile Implementation - Checksum * RAW volatile Implementation - Checksum
*/ */
uint32_t raw_volatile_checksum(struct ocf_cache *cache, uint32_t raw_volatile_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw) struct ocf_metadata_raw *raw)
{ {
return 0; return 0;
} }
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
return 0;
}
/* /*
* RAW volatile Implementation - Load all metadata elements from SSD * RAW volatile Implementation - Load all metadata elements from SSD
*/ */
int raw_volatile_load_all(struct ocf_cache *cache, void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw) ocf_metadata_end_t cmpl, void *priv)
{ {
return -ENOTSUP; cmpl(priv, -ENOTSUP);
} }
/* /*
* RAM Implementation - Flush all elements * RAM Implementation - Flush all elements
*/ */
int raw_volatile_flush_all(struct ocf_cache *cache, void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw) ocf_metadata_end_t cmpl, void *priv)
{ {
return 0; cmpl(priv, -ENOTSUP);
} }
/* /*
* RAM RAM Implementation - Mark to Flush * RAM RAM Implementation - Mark to Flush
*/ */
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req, void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{ {
} }
@ -65,7 +56,7 @@ void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
/* /*
* RAM RAM Implementation - Do Flush asynchronously * RAM RAM Implementation - Do Flush asynchronously
*/ */
int raw_volatile_flush_do_asynch(struct ocf_cache *cache, int raw_volatile_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw, struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete) ocf_req_end_t complete)
{ {

View File

@ -9,43 +9,37 @@
/* /*
* RAW volatile Implementation - Size on SSD * RAW volatile Implementation - Size on SSD
*/ */
uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache, uint32_t raw_volatile_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/* /*
* RAW volatile Implementation - Checksum * RAW volatile Implementation - Checksum
*/ */
uint32_t raw_volatile_checksum(struct ocf_cache *cache, uint32_t raw_volatile_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw); struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Flush specified element to SSD
*/
int raw_volatile_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line);
/* /*
* RAW volatile Implementation - Load all metadata elements from SSD * RAW volatile Implementation - Load all metadata elements from SSD
*/ */
int raw_volatile_load_all(struct ocf_cache *cache, void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw); ocf_metadata_end_t cmpl, void *priv);
/* /*
* RAW volatile Implementation - Flush all elements * RAW volatile Implementation - Flush all elements
*/ */
int raw_volatile_flush_all(struct ocf_cache *cache, void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
struct ocf_metadata_raw *raw); ocf_metadata_end_t cmpl, void *priv);
/* /*
* RAM RAW volatile Implementation - Mark to Flush * RAM RAW volatile Implementation - Mark to Flush
*/ */
void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req, void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/* /*
* RAM RAW volatile Implementation - Do Flush asynchronously * RAM RAW volatile Implementation - Do Flush asynchronously
*/ */
int raw_volatile_flush_do_asynch(struct ocf_cache *cache, int raw_volatile_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw, struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete); ocf_req_end_t complete);

View File

@ -6,6 +6,7 @@
#ifndef __METADATA_STRUCTS_H__ #ifndef __METADATA_STRUCTS_H__
#define __METADATA_STRUCTS_H__ #define __METADATA_STRUCTS_H__
#include "metadata_common.h"
#include "../eviction/eviction.h" #include "../eviction/eviction.h"
#include "../cleaning/cleaning.h" #include "../cleaning/cleaning.h"
#include "../ocf_request.h" #include "../ocf_request.h"
@ -161,33 +162,31 @@ struct ocf_metadata_iface {
* @brief Load metadata from cache device * @brief Load metadata from cache device
* *
* @param[in] cache - Cache instance * @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure * @param[in] cmpl - Completion callback
* @param[in] priv - Completion callback context
*/ */
int (*load_all)(struct ocf_cache *cache); void (*load_all)(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
/** /**
* @brief Load metadata from recovery procedure * @brief Load metadata from recovery procedure
* recovery *
* @param[in] cache - Cache instance * @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure * @param[in] cmpl - Completion callback
* @param[in] priv - Completion callback context
*/ */
int (*load_recovery)(struct ocf_cache *cache); void (*load_recovery)(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
/** /**
* @brief Flush metadata into cahce cache * @brief Flush metadata into cahce cache
* *
* @param[in] cache - Cache instance * @param[in] cache - Cache instance
* @return 0 - Operation success otherwise failure * @param[in] cmpl - Completion callback
* @param[in] priv - Completion callback context
*/ */
int (*flush_all)(struct ocf_cache *cache); void (*flush_all)(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void (*flush)(struct ocf_cache *cache, ocf_cache_line_t line);
/** /**
* @brief Mark specified cache line to be flushed * @brief Mark specified cache line to be flushed
@ -217,12 +216,15 @@ struct ocf_metadata_iface {
enum ocf_metadata_shutdown_status (*get_shutdown_status)( enum ocf_metadata_shutdown_status (*get_shutdown_status)(
struct ocf_cache *cache); struct ocf_cache *cache);
int (*set_shutdown_status)(struct ocf_cache *cache, void (*set_shutdown_status)(ocf_cache_t cache,
enum ocf_metadata_shutdown_status shutdown_status); enum ocf_metadata_shutdown_status shutdown_status,
ocf_metadata_end_t cmpl, void *priv);
int (*load_superblock)(struct ocf_cache *cache); void (*load_superblock)(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
int (*flush_superblock)(struct ocf_cache *cache); void (*flush_superblock)(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
uint64_t (*get_reserved_lba)(struct ocf_cache *cache); uint64_t (*get_reserved_lba)(struct ocf_cache *cache);
@ -249,16 +251,6 @@ struct ocf_metadata_iface {
ocf_cache_line_t line, ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy); union eviction_policy_meta *eviction_policy);
/**
* @brief Flush eviction policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_eviction_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/** /**
* @brief Get cleaning policy * @brief Get cleaning policy
* *
@ -282,15 +274,6 @@ struct ocf_metadata_iface {
ocf_cache_line_t line, ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy); struct cleaning_policy_meta *cleaning_policy);
/**
* @brief Flush cleaning policy for given cache line
*
* @param[in] cache - Cache instance
* @param[in] line - Cache line for which flushing has to be performed
*/
void (*flush_cleaning_policy)(struct ocf_cache *cache,
ocf_cache_line_t line);
/** /**
* @brief Get hash table for specified index * @brief Get hash table for specified index
* *
@ -312,15 +295,6 @@ struct ocf_metadata_iface {
void (*set_hash)(struct ocf_cache *cache, void (*set_hash)(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line); ocf_cache_line_t index, ocf_cache_line_t line);
/**
* @brief Flush has table for specified index
*
* @param[in] cache - Cache instance
* @param[in] index - Hash table index
*/
void (*flush_hash)(struct ocf_cache *cache,
ocf_cache_line_t index);
/** /**
* @brief Get hash table entries * @brief Get hash table entries
* *

View File

@ -66,36 +66,26 @@ static inline void ocf_metadata_set_shutdown_status(ocf_cache_t cache,
enum ocf_metadata_shutdown_status shutdown_status, enum ocf_metadata_shutdown_status shutdown_status,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result; cache->metadata.iface.set_shutdown_status(cache, shutdown_status,
cmpl, priv);
result = cache->metadata.iface.set_shutdown_status(cache,
shutdown_status);
cmpl(priv, result);
} }
static inline void ocf_metadata_load_superblock(ocf_cache_t cache, static inline void ocf_metadata_load_superblock(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result; cache->metadata.iface.load_superblock(cache, cmpl, priv);
result = cache->metadata.iface.load_superblock(cache);
cmpl(priv, result);
} }
static inline void ocf_metadata_flush_superblock(ocf_cache_t cache, static inline void ocf_metadata_flush_superblock(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv) ocf_metadata_end_t cmpl, void *priv)
{ {
int result = 0; /* TODO: Shouldn't it be checked by the caller? */
if (!cache->device) {
cmpl(priv, 0);
return;
}
if (cache->device) cache->metadata.iface.flush_superblock(cache, cmpl, priv);
result = cache->metadata.iface.flush_superblock(cache);
cmpl(priv, result);
}
static inline uint64_t ocf_metadata_get_reserved_lba(ocf_cache_t cache)
{
return cache->metadata.iface.get_reserved_lba(cache);
} }
#endif /* METADATA_SUPERBLOCK_H_ */ #endif /* METADATA_SUPERBLOCK_H_ */

View File

@ -369,31 +369,3 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
} }
ocf_volume_submit_io(io); ocf_volume_submit_io(io);
} }
struct ocf_submit_io_wait_context {
env_completion complete;
int error;
env_atomic req_remaining;
};
static void ocf_submit_io_wait_end(struct ocf_io *io, int error)
{
struct ocf_submit_io_wait_context *context = io->priv1;
context->error |= error;
env_completion_complete(&context->complete);
}
int ocf_submit_io_wait(struct ocf_io *io)
{
struct ocf_submit_io_wait_context context;
ENV_BUG_ON(env_memset(&context, sizeof(context), 0));
env_completion_init(&context.complete);
context.error = 0;
ocf_io_set_cmpl(io, &context, NULL, ocf_submit_io_wait_end);
ocf_volume_submit_io(io);
env_completion_wait(&context.complete);
return context.error;
}

View File

@ -45,8 +45,6 @@ static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1,
typedef void (*ocf_submit_end_t)(void *priv, int error); typedef void (*ocf_submit_end_t)(void *priv, int error);
int ocf_submit_io_wait(struct ocf_io *io);
void ocf_submit_volume_flush(ocf_volume_t volume, void ocf_submit_volume_flush(ocf_volume_t volume,
ocf_submit_end_t cmpl, void *priv); ocf_submit_end_t cmpl, void *priv);

View File

@ -1,105 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
//<tested_file_path>src/metadata/metadata_io.c</tested_file_path>
//<tested_function>metadata_io</tested_function>
#undef static
#undef inline
/*
* This headers must be in test source file. It's important that cmocka.h is
* last.
*/
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
/*
* Headers from tested target.
*/
#include "metadata.h"
#include "metadata_io.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../engine/engine_bf.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../utils/utils_allocator.h"
#include "../ocf_def_priv.h"
uint32_t __wrap_metadata_io_max_page(struct ocf_cache *cache)
{
function_called();
return mock();
}
void __wrap_env_cond_resched(void)
{
}
void __wrap_ocf_engine_push_req_front(struct ocf_request *req)
{
}
int __wrap_ocf_realloc(void **mem, size_t size, size_t count, size_t *limit)
{
}
int __wrap_ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit)
{
}
ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache)
{
}
int __wrap_ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
const char *fmt, ...)
{
}
int __wrap_metadata_submit_io(
struct ocf_cache *cache,
struct metadata_io *mio,
uint32_t count,
uint32_t written)
{
}
int __wrap_ocf_restart_meta_io(struct ocf_request *req)
{
}
static void metadata_io_test01(void **state)
{
int result;
struct metadata_io mio;
struct ocf_cache cache;
print_test_description("Check error no. when invalid operation is given");
mio.dir = -1;
mio.cache = &cache;
expect_function_call(__wrap_metadata_io_max_page);
will_return(__wrap_metadata_io_max_page, 256);
result = metadata_io(&mio);
assert_int_equal(result, -EINVAL);
}
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(metadata_io_test01)
};
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -1,245 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
//<tested_file_path>src/metadata/metadata_io.c</tested_file_path>
//<tested_function>metadata_submit_io</tested_function>
#undef static
#undef inline
/*
* This headers must be in test source file. It's important that cmocka.h is
* last.
*/
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
#include "print_desc.h"
/*
* Headers from tested target.
*/
#include "metadata.h"
#include "metadata_io.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../engine/engine_bf.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_allocator.h"
#include "../ocf_def_priv.h"
struct ocf_io *__wrap_ocf_new_cache_io(struct ocf_cache *cache)
{
function_called();
return mock_ptr_type(struct ocf_io *);
}
int __wrap_metadata_io_write_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
function_called();
return mock();
}
void *__wrap_ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages)
{
function_called();
return mock_ptr_type(void*);
}
void __wrap_ocf_io_configure(struct ocf_io *io, uint64_t addr,
uint32_t bytes, uint32_t dir, uint32_t class, uint64_t flags)
{
function_called();
}
void __wrap_metadata_io_end(struct ocf_io *io, int error)
{
}
void __wrap_ocf_io_set_cmpl(struct ocf_io *io, void *context,
void *context2, ocf_end_io_t fn)
{
function_called();
}
int __wrap_ocf_io_set_data(struct ocf_io *io, ctx_data_t *data,
uint32_t offset)
{
function_called();
return mock();
}
void __wrap_ocf_volume_submit_io(struct ocf_io *io)
{
function_called();
}
void __wrap_ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data)
{
function_called();
}
void __wrap_ocf_io_put(struct ocf_io *io)
{
function_called();
}
int __wrap_ocf_restart_meta_io(struct ocf_request *req)
{
}
void __wrap_env_atomic_inc(env_atomic *a)
{
function_called();
}
static void metadata_submit_io_test01(void **state)
{
int result;
struct metadata_io mio;
struct ocf_cache cache;
uint32_t count;
uint32_t written;
print_test_description("Couldn't allocate new IO");
expect_function_call(__wrap_ocf_new_cache_io);
will_return(__wrap_ocf_new_cache_io, 0);
result = metadata_submit_io(&cache, &mio, count, written);
assert_int_equal(result, -ENOMEM);
assert_int_equal(mio.error, -ENOMEM);
}
static void metadata_submit_io_test02(void **state)
{
int result;
struct metadata_io mio;
struct ocf_cache cache;
uint32_t count;
uint32_t written;
print_test_description("Couldn't allocate data buffer for IO");
expect_function_call(__wrap_ocf_new_cache_io);
will_return(__wrap_ocf_new_cache_io, 1);
expect_function_call(__wrap_ctx_data_alloc);
will_return(__wrap_ctx_data_alloc, 0);
expect_function_call(__wrap_ocf_io_put);
result = metadata_submit_io(&cache, &mio, count, written);
assert_int_equal(result, -ENOMEM);
assert_int_equal(mio.error, -ENOMEM);
}
static void metadata_submit_io_test03(void **state)
{
int result;
struct metadata_io mio;
struct ocf_cache cache;
uint32_t count;
uint32_t written;
int mio_err = 0;
print_test_description("Write operation is performed successfully");
mio.hndl_fn = __wrap_metadata_io_write_fill;
mio.dir = OCF_WRITE;
mio.error = mio_err;
count = 1;
expect_function_call(__wrap_ocf_new_cache_io);
will_return(__wrap_ocf_new_cache_io, 1);
expect_function_call(__wrap_ctx_data_alloc);
will_return(__wrap_ctx_data_alloc, 1);
expect_function_call(__wrap_metadata_io_write_fill);
will_return(__wrap_metadata_io_write_fill, 0);
expect_function_call(__wrap_ocf_io_configure);
expect_function_call(__wrap_ocf_io_set_cmpl);
expect_function_call(__wrap_ocf_io_set_data);
will_return(__wrap_ocf_io_set_data, 0);
expect_function_call(__wrap_env_atomic_inc);
expect_function_call(__wrap_ocf_volume_submit_io);
result = metadata_submit_io(&cache, &mio, count, written);
assert_int_equal(result, 0);
assert_int_equal(mio.error, mio_err);
}
static void metadata_submit_io_test04(void **state)
{
int result;
int i;
int interations_before_fail;
struct metadata_io mio;
struct ocf_cache cache;
uint32_t count;
uint32_t written;
print_test_description("Write operation is performed, but if fails at 3rd iteration");
mio.hndl_fn = __wrap_metadata_io_write_fill;
mio.dir = OCF_WRITE;
count = 3;
interations_before_fail = 2;
expect_function_call(__wrap_ocf_new_cache_io);
will_return(__wrap_ocf_new_cache_io, 1);
expect_function_call(__wrap_ctx_data_alloc);
will_return(__wrap_ctx_data_alloc, 1);
for (i = 0; i < interations_before_fail; i++) {
expect_function_call(__wrap_metadata_io_write_fill);
will_return(__wrap_metadata_io_write_fill, 0);
}
expect_function_call(__wrap_metadata_io_write_fill);
will_return(__wrap_metadata_io_write_fill, 1);
expect_function_call(__wrap_ctx_data_free);
expect_function_call(__wrap_ocf_io_put);
result = metadata_submit_io(&cache, &mio, count, written);
assert_int_equal(result, 1);
assert_int_equal(mio.error, 1);
}
/*
* Main function. It runs tests.
*/
int main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(metadata_submit_io_test01),
cmocka_unit_test(metadata_submit_io_test02),
cmocka_unit_test(metadata_submit_io_test03),
cmocka_unit_test(metadata_submit_io_test04)
};
print_message("Example template for tests\n");
return cmocka_run_group_tests(tests, NULL, NULL);
}