diff --git a/src/concurrency/ocf_cache_concurrency.c b/src/concurrency/ocf_cache_concurrency.c
index b497e45..b902b97 100644
--- a/src/concurrency/ocf_cache_concurrency.c
+++ b/src/concurrency/ocf_cache_concurrency.c
@@ -810,14 +810,6 @@ int ocf_req_trylock_rd(struct ocf_request *req)
return _ocf_req_lock_rd_common(req, req, _req_on_lock);
}
-/*
- * Lock wait request context
- */
-struct _req_wait_context {
- struct ocf_request *req;
- env_completion cmpl;
-};
-
/*
*
*/
diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c
index 89a8bbe..9a7a4c0 100644
--- a/src/metadata/metadata.c
+++ b/src/metadata/metadata.c
@@ -111,37 +111,23 @@ ocf_cache_line_t ocf_metadata_get_cachelines_count(ocf_cache_t cache)
void ocf_metadata_flush_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
- int result;
-
OCF_METADATA_LOCK_WR();
- result = cache->metadata.iface.flush_all(cache);
+ cache->metadata.iface.flush_all(cache, cmpl, priv);
OCF_METADATA_UNLOCK_WR();
- cmpl(priv, result);
-}
-
-void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line)
-{
- cache->metadata.iface.flush(cache, line);
}
void ocf_metadata_load_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
- int result;
-
OCF_METADATA_LOCK_WR();
- result = cache->metadata.iface.load_all(cache);
+ cache->metadata.iface.load_all(cache, cmpl, priv);
OCF_METADATA_UNLOCK_WR();
- cmpl(priv, result);
}
void ocf_metadata_load_recovery(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
- int result;
-
- result = cache->metadata.iface.load_recovery(cache);
- cmpl(priv, result);
+ cache->metadata.iface.load_recovery(cache, cmpl, priv);
}
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
diff --git a/src/metadata/metadata.h b/src/metadata/metadata.h
index a883dce..8715c12 100644
--- a/src/metadata/metadata.h
+++ b/src/metadata/metadata.h
@@ -6,6 +6,7 @@
#ifndef __METADATA_H__
#define __METADATA_H__
+#include "metadata_common.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
@@ -124,8 +125,6 @@ static inline void ocf_metadata_status_bits_unlock(
#define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache)
-typedef void (*ocf_metadata_end_t)(void *priv, int error);
-
#include "metadata_cleaning_policy.h"
#include "metadata_eviction_policy.h"
#include "metadata_partition.h"
@@ -233,15 +232,6 @@ ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
void ocf_metadata_flush_all(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
-
-/**
- * @brief Flush metadata for specified cache line
- *
- * @param[in] cache - Cache instance
- * @param[in] line - cache line which to be flushed
- */
-void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
-
/**
* @brief Mark specified cache line to be flushed
*
@@ -283,6 +273,17 @@ void ocf_metadata_load_all(ocf_cache_t cache,
void ocf_metadata_load_recovery(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv);
+
+/**
+ * @brief Get reserved area lba
+ *
+ * @param cache Cache instance
+ */
+static inline uint64_t ocf_metadata_get_reserved_lba(ocf_cache_t cache)
+{
+ return cache->metadata.iface.get_reserved_lba(cache);
+}
+
/*
* NOTE Hash table is specific for hash table metadata service implementation
* and should be used internally by metadata service.
@@ -302,12 +303,6 @@ static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
cache->metadata.iface.set_hash(cache, index, line);
}
-static inline void ocf_metadata_flush_hash(struct ocf_cache *cache,
- ocf_cache_line_t index)
-{
- cache->metadata.iface.flush_hash(cache, index);
-}
-
static inline ocf_cache_line_t ocf_metadata_entries_hash(
struct ocf_cache *cache)
{
diff --git a/src/metadata/metadata_cleaning_policy.h b/src/metadata/metadata_cleaning_policy.h
index 38098a1..2efb288 100644
--- a/src/metadata/metadata_cleaning_policy.h
+++ b/src/metadata/metadata_cleaning_policy.h
@@ -26,14 +26,4 @@ ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
cache->metadata.iface.set_cleaning_policy(cache, line, policy);
}
-/*
- * FLUSH
- */
-static inline void
-ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache,
- ocf_cache_line_t line)
-{
- cache->metadata.iface.flush_cleaning_policy(cache, line);
-}
-
#endif /* METADATA_CLEANING_POLICY_H_ */
diff --git a/src/metadata/metadata_common.h b/src/metadata/metadata_common.h
new file mode 100644
index 0000000..25cddd4
--- /dev/null
+++ b/src/metadata/metadata_common.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright(c) 2012-2018 Intel Corporation
+ * SPDX-License-Identifier: BSD-3-Clause-Clear
+ */
+
+#ifndef __METADATA_COMMON_H__
+#define __METADATA_COMMON_H__
+
+typedef void (*ocf_metadata_end_t)(void *priv, int error);
+
+#endif /* __METADATA_COMMON_H__ */
+
diff --git a/src/metadata/metadata_eviction_policy.h b/src/metadata/metadata_eviction_policy.h
index 8797edb..436492f 100644
--- a/src/metadata/metadata_eviction_policy.h
+++ b/src/metadata/metadata_eviction_policy.h
@@ -23,13 +23,4 @@ static inline void ocf_metadata_set_evicition_policy(
cache->metadata.iface.set_eviction_policy(cache, line, eviction);
}
-/*
- * FLUSH
- */
-static inline void ocf_metadata_flush_evicition_policy(
- struct ocf_cache *cache, ocf_cache_line_t line)
-{
- cache->metadata.iface.flush_eviction_policy(cache, line);
-}
-
#endif /* METADATA_EVICTION_H_ */
diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c
index b9d336d..4fc8726 100644
--- a/src/metadata/metadata_hash.c
+++ b/src/metadata/metadata_hash.c
@@ -10,6 +10,7 @@
#include "metadata_status.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_cache_line.h"
+#include "../utils/utils_pipeline.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_HASH_DEBUG 0
@@ -876,94 +877,105 @@ static size_t ocf_metadata_hash_size_of(struct ocf_cache *cache)
* Super Block
******************************************************************************/
-/*
- * Super Block - Load, This function has to prevent to pointers overwrite
- */
-static int ocf_metadata_hash_load_superblock(struct ocf_cache *cache)
+struct ocf_metadata_hash_context {
+ ocf_metadata_end_t cmpl;
+ void *priv;
+ ocf_pipeline_t pipeline;
+ ocf_cache_t cache;
+};
+
+static void ocf_metadata_hash_generic_complete(void *priv, int error)
{
- int result = 0;
- uint32_t i = 0;
+ struct ocf_metadata_hash_context *context = priv;
+
+ if (error)
+ ocf_pipeline_finish(context->pipeline, error);
+ else
+ ocf_pipeline_next(context->pipeline);
+}
+
+static void ocf_medatata_hash_load_segment(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ ocf_metadata_raw_load_all(cache, &ctrl->raw_desc[segment],
+ ocf_metadata_hash_generic_complete, context);
+}
+
+static void ocf_medatata_hash_check_crc_sb_config(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
struct ocf_metadata_hash_ctrl *ctrl;
struct ocf_superblock_config *sb_config;
- struct ocf_superblock_runtime *sb_runtime;
+ ocf_cache_t cache = context->cache;
+ int segment = metadata_segment_sb_config;
+ uint32_t crc;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ crc = env_crc32(0, (void *)sb_config,
+ offsetof(struct ocf_superblock_config, checksum));
+
+ if (crc != sb_config->checksum[segment]) {
+ /* Checksum does not match */
+ ocf_cache_log(cache, log_err,
+ "Loading %s ERROR, invalid checksum",
+ ocf_metadata_hash_raw_names[segment]);
+ ocf_pipeline_finish(pipeline, -OCF_ERR_INVAL);
+ return;
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_check_crc(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+ uint32_t crc;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ crc = ocf_metadata_raw_checksum(cache, &(ctrl->raw_desc[segment]));
+
+ if (crc != sb_config->checksum[segment]) {
+ /* Checksum does not match */
+ ocf_cache_log(cache, log_err,
+ "Loading %s ERROR, invalid checksum",
+ ocf_metadata_hash_raw_names[segment]);
+ ocf_pipeline_finish(pipeline, -OCF_ERR_INVAL);
+ return;
+ }
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_load_superblock_post(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
struct ocf_metadata_uuid *muuid;
struct ocf_volume_uuid uuid;
+ uint32_t i;
- OCF_DEBUG_TRACE(cache);
-
- ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
- ENV_BUG_ON(!ctrl);
-
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
- ENV_BUG_ON(!sb_config);
-
- sb_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_sb_runtime);
- ENV_BUG_ON(!sb_runtime);
-
- /* Load super block main information */
- result |= ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[metadata_segment_sb_config]));
-
- result |= ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[metadata_segment_sb_runtime]));
-
- /* Load core information */
- result |= ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[metadata_segment_core_config]));
- result |= ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[metadata_segment_core_uuid]));
-
- /* Do loading */
- if (result) {
- /* Loading super block failure */
- ocf_cache_log(cache, log_err,
- "Loading metadata of super block ERROR");
- goto ocf_metadata_hash_load_superblock_ERROR;
- }
-
- result = env_crc32(0, (void *)sb_config,
- offsetof(struct ocf_superblock_config, checksum)) !=
- sb_config->checksum[metadata_segment_sb_config];
-
- if (result) {
- /* Checksum does not match */
- ocf_cache_log(cache, log_err,
- "Loading config super block ERROR, invalid checksum");
- goto ocf_metadata_hash_load_superblock_ERROR;
- }
-
- result = ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[metadata_segment_sb_runtime])) !=
- sb_config->checksum[metadata_segment_sb_runtime];
-
- if (result) {
- /* Checksum does not match */
- ocf_cache_log(cache, log_err,
- "Loading runtime super block ERROR, invalid checksum");
- goto ocf_metadata_hash_load_superblock_ERROR;
- }
-
- result = ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[metadata_segment_core_config])) !=
- sb_config->checksum[metadata_segment_core_config];
-
- if (result) {
- /* Checksum does not match */
- ocf_cache_log(cache, log_err,
- "Loading core config section ERROR, invalid checksum");
- goto ocf_metadata_hash_load_superblock_ERROR;
- }
-
- result = ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[metadata_segment_core_uuid])) !=
- sb_config->checksum[metadata_segment_core_uuid];
-
- if (result) {
- /* Checksum does not match */
- ocf_cache_log(cache, log_err,
- "Loading uuid section ERROR, invalid checksum");
- goto ocf_metadata_hash_load_superblock_ERROR;
- }
for (i = 0; i < OCF_CORE_MAX; i++) {
if (!cache->core_conf_meta[i].added)
@@ -985,40 +997,111 @@ static int ocf_metadata_hash_load_superblock(struct ocf_cache *cache)
if (sb_config->core_count > OCF_CORE_MAX) {
ocf_cache_log(cache, log_err,
"Loading cache state ERROR, invalid cores count\n");
- goto ocf_metadata_hash_load_superblock_ERROR;
+ ocf_pipeline_finish(pipeline, -OCF_ERR_INVAL);
+ return;
}
if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) {
ocf_cache_log(cache, log_err,
"Loading cache state ERROR, invalid partition count\n");
- goto ocf_metadata_hash_load_superblock_ERROR;
+ ocf_pipeline_finish(pipeline, -OCF_ERR_INVAL);
+ return;
}
- return 0;
-
-ocf_metadata_hash_load_superblock_ERROR:
-
- ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
- ocf_metadata_error(cache);
- return -1;
-
+ ocf_pipeline_next(pipeline);
}
-/*
- * Super Block - FLUSH
- */
-static int ocf_metadata_hash_flush_superblock(struct ocf_cache *cache)
+static void ocf_metadata_hash_load_superblock_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
{
- uint32_t i;
- int result = 0;
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
+ ocf_metadata_error(cache);
+ }
+
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_sb_load_segment_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_config),
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_sb_check_crc_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_load_sb_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_superblock_finish,
+ .steps = {
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_load_segment,
+ ocf_metadata_hash_load_sb_load_segment_args),
+ OCF_PL_STEP(ocf_medatata_hash_check_crc_sb_config),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_check_crc,
+ ocf_metadata_hash_load_sb_check_crc_args),
+ OCF_PL_STEP(ocf_medatata_hash_load_superblock_post),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Super Block - Load, This function has to prevent to pointers overwrite
+ */
+static void ocf_metadata_hash_load_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
struct ocf_metadata_hash_ctrl *ctrl;
- struct ocf_superblock_config *superblock;
+ struct ocf_superblock_config *sb_config;
+ struct ocf_superblock_runtime *sb_runtime;
+ int result;
OCF_DEBUG_TRACE(cache);
ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
+ ENV_BUG_ON(!ctrl);
- superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+ ENV_BUG_ON(!sb_config);
+
+ sb_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_sb_runtime);
+ ENV_BUG_ON(!sb_runtime);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_sb_pipeline_props);
+ if (result) {
+ cmpl(priv, result);
+ return;
+ }
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_flush_superblock_prepare(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ uint32_t i;
/* Synchronize core objects types */
for (i = 0; i < OCF_CORE_MAX; i++) {
@@ -1026,34 +1109,126 @@ static int ocf_metadata_hash_flush_superblock(struct ocf_cache *cache)
cache->owner, cache->core[i].volume.type);
}
- /* Calculate checksum */
- superblock->checksum[metadata_segment_sb_config] = env_crc32(0,
- (void *)superblock,
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_calculate_crc_sb_config(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
+
+ sb_config->checksum[metadata_segment_sb_config] = env_crc32(0,
+ (void *)sb_config,
offsetof(struct ocf_superblock_config, checksum));
- superblock->checksum[metadata_segment_core_config] =
- ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[metadata_segment_core_config]));
+ ocf_pipeline_next(pipeline);
+}
- superblock->checksum[metadata_segment_core_uuid] =
- ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[metadata_segment_core_uuid]));
+static void ocf_medatata_hash_calculate_crc(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ struct ocf_superblock_config *sb_config;
+ ocf_cache_t cache = context->cache;
- /**
- * Flush RAW container that contains super block
- */
- result = ocf_metadata_raw_flush_all(cache,
- &(ctrl->raw_desc[metadata_segment_sb_config]));
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+ sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
- result |= ocf_metadata_raw_flush_all(cache,
- &(ctrl->raw_desc[metadata_segment_core_config]));
+ sb_config->checksum[segment] = ocf_metadata_raw_checksum(cache,
+ &(ctrl->raw_desc[segment]));
- result |= ocf_metadata_raw_flush_all(cache,
- &(ctrl->raw_desc[metadata_segment_core_uuid]));
- if (result)
+ ocf_pipeline_next(pipeline);
+}
+
+static void ocf_medatata_hash_flush_segment(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ int segment = ocf_pipeline_arg_get_int(arg);
+ struct ocf_metadata_hash_ctrl *ctrl;
+ ocf_cache_t cache = context->cache;
+
+ ctrl = (struct ocf_metadata_hash_ctrl *)cache->metadata.iface_priv;
+
+ ocf_metadata_raw_flush_all(cache, &ctrl->raw_desc[segment],
+ ocf_metadata_hash_generic_complete, context);
+}
+
+static void ocf_metadata_hash_flush_superblock_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error)
ocf_metadata_error(cache);
- return result;
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_flush_sb_calculate_crc_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_arg ocf_metadata_hash_flush_sb_flush_segment_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_config),
+ OCF_PL_ARG_INT(metadata_segment_core_config),
+ OCF_PL_ARG_INT(metadata_segment_core_uuid),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_flush_sb_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_flush_superblock_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_medatata_hash_flush_superblock_prepare),
+ OCF_PL_STEP(ocf_medatata_hash_calculate_crc_sb_config),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_calculate_crc,
+ ocf_metadata_hash_flush_sb_calculate_crc_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_flush_segment,
+ ocf_metadata_hash_flush_sb_flush_segment_args),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Super Block - FLUSH
+ */
+static void ocf_metadata_hash_flush_superblock(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_flush_sb_pipeline_props);
+ if (result) {
+ cmpl(priv, result);
+ return;
+ }
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
}
/**
@@ -1064,9 +1239,9 @@ static int ocf_metadata_hash_flush_superblock(struct ocf_cache *cache)
*
* @return Operation status (0 success, otherwise error)
*/
-static int ocf_metadata_hash_set_shutdown_status(
- struct ocf_cache *cache,
- enum ocf_metadata_shutdown_status shutdown_status)
+static void ocf_metadata_hash_set_shutdown_status(ocf_cache_t cache,
+ enum ocf_metadata_shutdown_status shutdown_status,
+ ocf_metadata_end_t cmpl, void *priv)
{
struct ocf_metadata_hash_ctrl *ctrl;
struct ocf_superblock_config *superblock;
@@ -1088,7 +1263,7 @@ static int ocf_metadata_hash_set_shutdown_status(
superblock->magic_number = CACHE_MAGIC_NUMBER;
/* Flush superblock */
- return ocf_metadata_hash_flush_superblock(cache);
+ ocf_metadata_hash_flush_superblock(cache, cmpl, priv);
}
/*******************************************************************************
@@ -1111,95 +1286,105 @@ static uint64_t ocf_metadata_hash_get_reserved_lba(
* FLUSH AND LOAD ALL
******************************************************************************/
-/*
- * Flush all metadata
- */
-static int ocf_metadata_hash_flush_all(struct ocf_cache *cache)
+static void ocf_medatata_hash_flush_all_set_status_complete(
+ void *priv, int error)
{
- struct ocf_metadata_hash_ctrl *ctrl;
- struct ocf_superblock_config *superblock;
- int result = 0;
- uint32_t i = 0;
-
- OCF_DEBUG_TRACE(cache);
-
- ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
-
- ocf_metadata_hash_set_shutdown_status(cache,
- ocf_metadata_dirty_shutdown);
-
- /*
- * Flush all RAW metadata container
- */
- for (i = 0; i < metadata_segment_max; i++) {
- if ((metadata_segment_sb_config == i) ||
- (metadata_segment_core_config == i) ||
- (metadata_segment_core_uuid == i)) {
- continue;
- }
-
- result |= ocf_metadata_raw_flush_all(cache,
- &(ctrl->raw_desc[i]));
+ struct ocf_metadata_hash_context *context = priv;
+ if (error) {
+ ocf_pipeline_finish(context->pipeline, error);
+ return;
}
- if (result == 0) {
- for (i = 0; i < metadata_segment_max; i++) {
- if ((metadata_segment_sb_config == i) ||
- (metadata_segment_core_config == i) ||
- (metadata_segment_core_uuid == i)) {
- continue;
- }
+ ocf_pipeline_next(context->pipeline);
+}
- superblock->checksum[i] = ocf_metadata_raw_checksum(
- cache, &(ctrl->raw_desc[i]));
- }
+static void ocf_medatata_hash_flush_all_set_status(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ enum ocf_metadata_shutdown_status shutdown_status =
+ ocf_pipeline_arg_get_int(arg);
- /* Set clean shutdown status (it flushes entire superblock) */
- result = ocf_metadata_hash_set_shutdown_status(cache,
- ocf_metadata_clean_shutdown);
- }
+ ocf_metadata_hash_set_shutdown_status(cache, shutdown_status,
+ ocf_medatata_hash_flush_all_set_status_complete,
+ context);
+}
- if (result) {
- ocf_metadata_error(cache);
+static void ocf_metadata_hash_flush_all_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n");
- return result;
+ ocf_metadata_error(cache);
+ goto out;
}
ocf_cache_log(cache, log_info, "Done saving cache state!\n");
- return result;
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
}
+struct ocf_pipeline_arg ocf_metadata_hash_flush_all_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_sb_runtime),
+ OCF_PL_ARG_INT(metadata_segment_core_runtime),
+ OCF_PL_ARG_INT(metadata_segment_cleaning),
+ OCF_PL_ARG_INT(metadata_segment_eviction),
+ OCF_PL_ARG_INT(metadata_segment_collision),
+ OCF_PL_ARG_INT(metadata_segment_list_info),
+ OCF_PL_ARG_INT(metadata_segment_hash),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_flush_all_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_flush_all_finish,
+ .steps = {
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_flush_all_set_status,
+ ocf_metadata_dirty_shutdown),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_flush_segment,
+ ocf_metadata_hash_flush_all_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_calculate_crc,
+ ocf_metadata_hash_flush_all_args),
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_flush_all_set_status,
+ ocf_metadata_clean_shutdown),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
/*
- * Flush specified cache line
+ * Flush all metadata
*/
-static void ocf_metadata_hash_flush(struct ocf_cache *cache,
- ocf_cache_line_t line)
+static void ocf_metadata_hash_flush_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
{
- int result = 0;
- struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
OCF_DEBUG_TRACE(cache);
- ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- /*
- * Flush all required metadata elements to make given metadata cache
- * line persistent in case of recovery
- */
-
- /* Collision table to get mapping cache line to HDD sector*/
- result |= ocf_metadata_raw_flush(cache,
- &(ctrl->raw_desc[metadata_segment_collision]),
- line);
-
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_flush_all_pipeline_props);
if (result) {
- ocf_metadata_error(cache);
- ocf_cache_log(cache, log_err,
- "Metadata Flush ERROR for cache line %u\n", line);
+ cmpl(priv, result);
+ return;
}
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
}
/*
@@ -1256,74 +1441,74 @@ static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache,
}
}
-/*
- * Load all metadata
- */
-static int ocf_metadata_hash_load_all(struct ocf_cache *cache)
+static void ocf_metadata_hash_load_all_finish(ocf_pipeline_t pipeline,
+ void *priv, int error)
{
- struct ocf_metadata_hash_ctrl *ctrl;
- struct ocf_superblock_config *superblock;
- int result = 0, i = 0;
- uint32_t checksum;
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
- OCF_DEBUG_TRACE(cache);
-
- ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config);
-
- /*
- * Load all RAW metadata container
- */
- for (i = 0; i < metadata_segment_max; i++) {
- if ((metadata_segment_sb_config == i) ||
- (metadata_segment_sb_runtime == i) ||
- (metadata_segment_core_config == i) ||
- (metadata_segment_core_uuid == i)) {
- /* Super block and core metadata are loaded separately */
- continue;
- }
-
- result = ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[i]));
- if (result)
- break;
-
- if (i == metadata_segment_reserved) {
- /* Don't check checksum for reserved area */
- continue;
- }
-
- checksum = ocf_metadata_raw_checksum(cache,
- &(ctrl->raw_desc[i]));
-
- if (checksum != superblock->checksum[i]) {
- result = -EINVAL;
- break;
- }
- }
-
- if (result) {
- ocf_metadata_error(cache);
+ if (error) {
ocf_cache_log(cache, log_err, "Metadata read FAILURE\n");
- return -1;
- }
-
- /*
- * TODO(rbaldyga): Is that related to metadata at all? If not, then it
- * should be moved to some better place.
- */
- /* Final error checking */
- if (!env_bit_test(ocf_cache_state_running, &cache->cache_state)
- && !env_bit_test(ocf_cache_state_initializing,
- &cache->cache_state)) {
- ocf_cache_log(cache, log_err,
- "Metadata Read failed! OCF Stopped!\n");
- return -1;
+ ocf_metadata_error(cache);
+ goto out;
}
ocf_cache_log(cache, log_info, "Done loading cache state\n");
- return 0;
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_arg ocf_metadata_hash_load_all_args[] = {
+ OCF_PL_ARG_INT(metadata_segment_core_runtime),
+ OCF_PL_ARG_INT(metadata_segment_cleaning),
+ OCF_PL_ARG_INT(metadata_segment_eviction),
+ OCF_PL_ARG_INT(metadata_segment_collision),
+ OCF_PL_ARG_INT(metadata_segment_list_info),
+ OCF_PL_ARG_INT(metadata_segment_hash),
+ OCF_PL_ARG_TERMINATOR(),
+};
+
+struct ocf_pipeline_properties ocf_metadata_hash_load_all_pipeline_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_all_finish,
+ .steps = {
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_load_segment,
+ ocf_metadata_hash_load_all_args),
+ OCF_PL_STEP_FOREACH(ocf_medatata_hash_check_crc,
+ ocf_metadata_hash_load_all_args),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * Load all metadata
+ */
+static void ocf_metadata_hash_load_all(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_all_pipeline_props);
+ if (result) {
+ cmpl(priv, result);
+ return;
+ }
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
}
static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache,
@@ -1393,9 +1578,12 @@ static void _recovery_reset_cline_metadata(struct ocf_cache *cache,
init_cache_block(cache, cline);
}
-static void _recovery_rebuild_metadata(struct ocf_cache *cache,
- bool dirty_only)
+static void _recovery_rebuild_metadata(ocf_pipeline_t pipeline,
+ void *priv, ocf_pipeline_arg_t arg)
{
+ struct ocf_metadata_hash_context *context = priv;
+ bool dirty_only = ocf_pipeline_arg_get_int(arg);
+ ocf_cache_t cache = context->cache;
ocf_cache_line_t cline;
ocf_core_id_t core_id;
uint64_t core_line;
@@ -1422,30 +1610,66 @@ static void _recovery_rebuild_metadata(struct ocf_cache *cache,
}
OCF_METADATA_UNLOCK_WR();
+
+ ocf_pipeline_next(pipeline);
}
-static int _ocf_metadata_hash_load_recovery_legacy(
- struct ocf_cache *cache)
+static void ocf_metadata_hash_load_recovery_legacy_finish(
+ ocf_pipeline_t pipeline, void *priv, int error)
{
- int result = 0;
- struct ocf_metadata_hash_ctrl *ctrl = NULL;
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Metadata read for recovery FAILURE\n");
+ ocf_metadata_error(cache);
+ goto out;
+ }
+
+ ocf_cache_log(cache, log_info, "Done loading cache state\n");
+
+out:
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_properties
+ocf_metadata_hash_load_recovery_legacy_pl_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_recovery_legacy_finish,
+ .steps = {
+ OCF_PL_STEP_ARG_INT(ocf_medatata_hash_load_segment,
+ metadata_segment_collision),
+ OCF_PL_STEP_ARG_INT(_recovery_rebuild_metadata, true),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+static void _ocf_metadata_hash_load_recovery_legacy(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
OCF_DEBUG_TRACE(cache);
- ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- /* Collision table to get mapping cache line to HDD sector*/
- result |= ocf_metadata_raw_load_all(cache,
- &(ctrl->raw_desc[metadata_segment_collision]));
-
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_recovery_legacy_pl_props);
if (result) {
- ocf_metadata_error(cache);
- ocf_cache_log(cache, log_err,
- "Metadata read for recovery FAILURE\n");
- return result;
+ cmpl(priv, result);
+ return;
}
- return result;
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
}
static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq(
@@ -1462,11 +1686,25 @@ static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq(
return i;
}
-static int _ocf_metadata_hash_load_atomic(struct ocf_cache *cache,
- uint64_t sector_addr, uint32_t sector_no,
- ctx_data_t *data)
+
+static void ocf_metadata_hash_load_atomic_metadata_complete(
+ ocf_cache_t cache, void *priv, int error)
{
- uint32_t i;
+ struct ocf_metadata_hash_context *context = priv;
+
+ if (error) {
+ ocf_pipeline_finish(context->pipeline, error);
+ return;
+ }
+
+ ocf_pipeline_next(context->pipeline);
+}
+
+static int ocf_metadata_hash_load_atomic_metadata_drain(void *priv,
+ uint64_t sector_addr, uint32_t sector_no, ctx_data_t *data)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
struct ocf_atomic_metadata meta;
ocf_cache_line_t line = 0;
uint8_t pos = 0;
@@ -1474,6 +1712,7 @@ static int _ocf_metadata_hash_load_atomic(struct ocf_cache *cache,
ocf_core_id_t core_id = OCF_CORE_ID_INVALID;
uint64_t core_line = 0;
bool core_line_ok = false;
+ uint32_t i;
for (i = 0; i < sector_no; i++) {
ctx_data_rd_check(cache->owner, &meta, data, sizeof(meta));
@@ -1508,53 +1747,92 @@ static int _ocf_metadata_hash_load_atomic(struct ocf_cache *cache,
return 0;
}
-/*
- * RAM Implementation - Load all metadata elements from SSD
- */
-static int _ocf_metadata_hash_load_recovery_atomic(
- struct ocf_cache *cache)
+static void ocf_medatata_hash_load_atomic_metadata(
+ ocf_pipeline_t pipeline, void *priv, ocf_pipeline_arg_t arg)
{
- int result = 0;
-
- OCF_DEBUG_TRACE(cache);
-
- /* Collision table to get mapping cache line to HDD sector*/
- result |= metadata_io_read_i_atomic(cache,
- _ocf_metadata_hash_load_atomic);
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+ int result;
+ result = metadata_io_read_i_atomic(cache, cache->mngt_queue,
+ context, ocf_metadata_hash_load_atomic_metadata_drain,
+ ocf_metadata_hash_load_atomic_metadata_complete);
if (result) {
ocf_metadata_error(cache);
ocf_cache_log(cache, log_err,
"Metadata read for recovery FAILURE\n");
- return result;
+ ocf_pipeline_finish(pipeline, result);
+ }
+}
+
+static void ocf_metadata_hash_load_recovery_atomic_finish(
+ ocf_pipeline_t pipeline, void *priv, int error)
+{
+ struct ocf_metadata_hash_context *context = priv;
+ ocf_cache_t cache = context->cache;
+
+ if (error) {
+ ocf_cache_log(cache, log_err,
+ "Metadata read for recovery FAILURE\n");
+ ocf_metadata_error(cache);
}
- return result;
+ context->cmpl(context->priv, error);
+ ocf_pipeline_destroy(pipeline);
+}
+
+struct ocf_pipeline_properties
+ocf_metadata_hash_load_recovery_atomic_pl_props = {
+ .priv_size = sizeof(struct ocf_metadata_hash_context),
+ .finish = ocf_metadata_hash_load_recovery_atomic_finish,
+ .steps = {
+ OCF_PL_STEP(ocf_medatata_hash_load_atomic_metadata),
+ OCF_PL_STEP_ARG_INT(_recovery_rebuild_metadata, false),
+ OCF_PL_STEP_TERMINATOR(),
+ },
+};
+
+/*
+ * RAM Implementation - Load all metadata elements from SSD
+ */
+static void _ocf_metadata_hash_load_recovery_atomic(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct ocf_metadata_hash_context *context;
+ ocf_pipeline_t pipeline;
+ int result;
+
+ OCF_DEBUG_TRACE(cache);
+
+ result = ocf_pipeline_create(&pipeline, cache,
+ &ocf_metadata_hash_load_recovery_atomic_pl_props);
+ if (result) {
+ cmpl(priv, result);
+ return;
+ }
+
+ context = ocf_pipeline_get_priv(pipeline);
+
+ context->cmpl = cmpl;
+ context->priv = priv;
+ context->pipeline = pipeline;
+ context->cache = cache;
+
+ ocf_pipeline_next(pipeline);
}
/*
* Load for recovery - Load only data that is required for recovery procedure
*/
-static int ocf_metadata_hash_load_recovery(struct ocf_cache *cache)
+static void ocf_metadata_hash_load_recovery(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv)
{
- int result = 0;
- bool rebuild_dirty_only;
-
OCF_DEBUG_TRACE(cache);
-
- if (ocf_volume_is_atomic(&cache->device->volume)) {
- result = _ocf_metadata_hash_load_recovery_atomic(cache);
- rebuild_dirty_only = false;
- } else {
- result = _ocf_metadata_hash_load_recovery_legacy(cache);
- rebuild_dirty_only = true;
- }
-
- if (!result)
- _recovery_rebuild_metadata(cache, rebuild_dirty_only);
-
- return result;
+ if (ocf_volume_is_atomic(&cache->device->volume))
+ _ocf_metadata_hash_load_recovery_atomic(cache, cmpl, priv);
+ else
+ _ocf_metadata_hash_load_recovery_legacy(cache, cmpl, priv);
}
/*******************************************************************************
@@ -1736,23 +2014,6 @@ static void ocf_metadata_hash_set_hash(struct ocf_cache *cache,
ocf_metadata_error(cache);
}
-/*
- * Hash Table - Flush
- */
-static void ocf_metadata_hash_flush_hash(struct ocf_cache *cache,
- ocf_cache_line_t index)
-{
- int result = 0;
- struct ocf_metadata_hash_ctrl *ctrl
- = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- result = ocf_metadata_raw_flush(cache,
- &(ctrl->raw_desc[metadata_segment_hash]), index);
-
- if (result)
- ocf_metadata_error(cache);
-}
-
/*
* Hash Table - Get Entries
*/
@@ -1807,23 +2068,6 @@ static void ocf_metadata_hash_set_cleaning_policy(
ocf_metadata_error(cache);
}
-/*
- * Cleaning policy - Flush
- */
-static void ocf_metadata_hash_flush_cleaning_policy(
- struct ocf_cache *cache, ocf_cache_line_t line)
-{
- int result = 0;
- struct ocf_metadata_hash_ctrl *ctrl
- = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- result = ocf_metadata_raw_flush(cache,
- &(ctrl->raw_desc[metadata_segment_cleaning]), line);
-
- if (result)
- ocf_metadata_error(cache);
-}
-
/*******************************************************************************
* Eviction policy
******************************************************************************/
@@ -1866,23 +2110,6 @@ static void ocf_metadata_hash_set_eviction_policy(
ocf_metadata_error(cache);
}
-/*
- * Cleaning policy - Flush
- */
-static void ocf_metadata_hash_flush_eviction_policy(
- struct ocf_cache *cache, ocf_cache_line_t line)
-{
- int result = 0;
- struct ocf_metadata_hash_ctrl *ctrl
- = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv;
-
- result = ocf_metadata_raw_flush(cache,
- &(ctrl->raw_desc[metadata_segment_eviction]), line);
-
- if (result)
- ocf_metadata_error(cache);
-}
-
/*******************************************************************************
* Collision
******************************************************************************/
@@ -2242,7 +2469,6 @@ static const struct ocf_metadata_iface metadata_hash_iface = {
* Load all, flushing all, etc...
*/
.flush_all = ocf_metadata_hash_flush_all,
- .flush = ocf_metadata_hash_flush,
.flush_mark = ocf_metadata_hash_flush_mark,
.flush_do_asynch = ocf_metadata_hash_flush_do_asynch,
.load_all = ocf_metadata_hash_load_all,
@@ -2301,7 +2527,6 @@ static const struct ocf_metadata_iface metadata_hash_iface = {
*/
.get_hash = ocf_metadata_hash_get_hash,
.set_hash = ocf_metadata_hash_set_hash,
- .flush_hash = ocf_metadata_hash_flush_hash,
.entries_hash = ocf_metadata_hash_entries_hash,
/*
@@ -2309,14 +2534,12 @@ static const struct ocf_metadata_iface metadata_hash_iface = {
*/
.get_cleaning_policy = ocf_metadata_hash_get_cleaning_policy,
.set_cleaning_policy = ocf_metadata_hash_set_cleaning_policy,
- .flush_cleaning_policy = ocf_metadata_hash_flush_cleaning_policy,
/*
* Eviction Policy
*/
.get_eviction_policy = ocf_metadata_hash_get_eviction_policy,
.set_eviction_policy = ocf_metadata_hash_set_eviction_policy,
- .flush_eviction_policy = ocf_metadata_hash_flush_eviction_policy,
};
/*******************************************************************************
diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c
index 020694e..f1fb315 100644
--- a/src/metadata/metadata_io.c
+++ b/src/metadata/metadata_io.c
@@ -32,7 +32,7 @@
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
-static void metadata_io_write_i_asynch_end(struct metadata_io_request *request,
+static void metadata_io_i_asynch_end(struct metadata_io_request *request,
int error);
static int ocf_restart_meta_io(struct ocf_request *req);
@@ -64,9 +64,12 @@ static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
/*
* Iterative read request
+ * TODO: Make this function asynchronous to enable async recovery
+ * in atomic mode.
*/
-int metadata_io_read_i_atomic(ocf_cache_t cache,
- ocf_metadata_atomic_io_event_t hndl)
+int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, ocf_metadata_atomic_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl)
{
uint64_t i;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
@@ -84,7 +87,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Allocate one 4k page for metadata*/
data = ctx_data_alloc(cache->owner, 1);
if (!data)
- return -ENOMEM;
+ return -OCF_ERR_NO_MEM;
count = io_sectors_count;
for (i = 0; i < io_sectors_count; i += curr_count) {
@@ -100,7 +103,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
- result = -ENOMEM;
+ result = -OCF_ERR_NO_MEM;
break;
}
@@ -131,7 +134,7 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
break;
}
- result |= hndl(cache, i, curr_count, data);
+ result |= drain_hndl(cache, i, curr_count, data);
if (result)
break;
@@ -143,41 +146,59 @@ int metadata_io_read_i_atomic(ocf_cache_t cache,
/* Memory free */
ctx_data_free(cache->owner, data);
- return result;
+ compl_hndl(cache, context, result);
+
+ return 0;
}
-static void metadata_io_write_i_asynch_cmpl(struct ocf_io *io, int error)
+static void metadata_io_i_asynch_cmpl(struct ocf_io *io, int error)
{
struct metadata_io_request *request = io->priv1;
- metadata_io_write_i_asynch_end(request, error);
+ metadata_io_i_asynch_end(request, error);
ocf_io_put(io);
}
-static int ocf_restart_meta_io(struct ocf_request *req)
+static void metadata_io_req_fill(struct metadata_io_request *meta_io_req)
{
- struct ocf_io *io;
- struct metadata_io_request *meta_io_req;
- ocf_cache_t cache;
+ ocf_cache_t cache = meta_io_req->cache;
int i;
- int ret;
- cache = req->cache;
- meta_io_req = req->priv;
-
- /* Fill with the latest metadata. */
- OCF_METADATA_LOCK_RD();
for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_fill(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context);
-
}
+}
+
+static void metadata_io_req_drain(struct metadata_io_request *meta_io_req)
+{
+ ocf_cache_t cache = meta_io_req->cache;
+ int i;
+
+ for (i = 0; i < meta_io_req->count; i++) {
+ meta_io_req->on_meta_drain(cache, meta_io_req->data,
+ meta_io_req->page + i, meta_io_req->context);
+ }
+}
+
+static int ocf_restart_meta_io(struct ocf_request *req)
+{
+ struct metadata_io_request *meta_io_req = req->priv;
+ ocf_cache_t cache = req->cache;
+ struct ocf_io *io;
+ int ret;
+
+ cache = req->cache;
+
+ /* Fill with the latest metadata. */
+ OCF_METADATA_LOCK_RD();
+ metadata_io_req_fill(meta_io_req);
OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache);
if (!io) {
- metadata_io_write_i_asynch_end(meta_io_req, -ENOMEM);
+ metadata_io_i_asynch_end(meta_io_req, -OCF_ERR_NO_MEM);
return 0;
}
@@ -187,11 +208,11 @@ static int ocf_restart_meta_io(struct ocf_request *req)
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
- ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_write_i_asynch_cmpl);
+ ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_i_asynch_cmpl);
ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) {
ocf_io_put(io);
- metadata_io_write_i_asynch_end(meta_io_req, ret);
+ metadata_io_i_asynch_end(meta_io_req, ret);
return ret;
}
ocf_volume_submit_io(io);
@@ -201,7 +222,7 @@ static int ocf_restart_meta_io(struct ocf_request *req)
/*
* Iterative asynchronous write callback
*/
-static void metadata_io_write_i_asynch_end(struct metadata_io_request *request,
+static void metadata_io_i_asynch_end(struct metadata_io_request *request,
int error)
{
struct metadata_io_request_asynch *a_req;
@@ -218,6 +239,9 @@ static void metadata_io_write_i_asynch_end(struct metadata_io_request *request,
if (error) {
request->error |= error;
request->asynch->error |= error;
+ } else {
+ if (request->fl_req.rw == OCF_READ)
+ metadata_io_req_drain(request);
}
if (env_atomic_dec_return(&request->req_remaining))
@@ -262,15 +286,15 @@ static void metadata_io_req_error(ocf_cache_t cache,
/*
* Iterative write request asynchronously
*/
-int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
void *context, uint32_t page, uint32_t count,
- ocf_metadata_io_event_t fill_hndl,
- ocf_metadata_io_hndl_on_write_t compl_hndl)
+ ocf_metadata_io_event_t io_hndl,
+ ocf_metadata_io_end_t compl_hndl)
{
uint32_t curr_count, written;
uint32_t max_count = metadata_io_max_page(cache);
uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count);
- uint32_t i, i_fill;
+ uint32_t i;
int error = 0, ret;
struct ocf_io *io;
@@ -322,12 +346,14 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
a_req->reqs[i].context = context;
a_req->reqs[i].page = page + written;
a_req->reqs[i].count = curr_count;
- a_req->reqs[i].on_meta_fill = fill_hndl;
+ a_req->reqs[i].on_meta_fill = io_hndl;
+ a_req->reqs[i].on_meta_drain = io_hndl;
a_req->reqs[i].fl_req.io_if = &meta_restart_if;
a_req->reqs[i].fl_req.io_queue = queue;
a_req->reqs[i].fl_req.cache = cache;
a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
a_req->reqs[i].fl_req.info.internal = true;
+ a_req->reqs[i].fl_req.rw = dir;
/*
* We don't want allocate map for this request in
@@ -355,20 +381,17 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
break;
}
- for (i_fill = 0; i_fill < curr_count; i_fill++) {
- fill_hndl(cache, a_req->reqs[i].data,
- page + written + i_fill,
- context);
- }
+ if (dir == OCF_WRITE)
+ metadata_io_req_fill(&a_req->reqs[i]);
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
- OCF_WRITE, 0, 0);
+ dir, 0, 0);
ocf_io_set_cmpl(io, &a_req->reqs[i], NULL,
- metadata_io_write_i_asynch_cmpl);
+ metadata_io_i_asynch_cmpl);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
if (error) {
ocf_io_put(io);
@@ -423,6 +446,24 @@ int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
return error;
}
+int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t fill_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
+ page, count, fill_hndl, compl_hndl);
+}
+
+int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl)
+{
+ return metadata_io_i_asynch(cache, queue, OCF_READ, context,
+ page, count, drain_hndl, compl_hndl);
+}
+
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
@@ -432,206 +473,3 @@ void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}
-
-static void metadata_io_end(struct ocf_io *io, int error)
-{
- struct metadata_io *mio = io->priv1;
- ctx_data_t *data = ocf_io_get_data(io);
- uint32_t page = BYTES_TO_PAGES(io->addr);
- uint32_t count = BYTES_TO_PAGES(io->bytes);
- ocf_cache_t cache = mio->cache;
- uint32_t i = 0;
-
- if (error) {
- mio->error |= error;
- goto out;
- }
-
- for (i = 0; mio->dir == OCF_READ && i < count; i++) {
- mio->error |= mio->hndl_fn(cache, data, page + i,
- mio->hndl_cntx);
- }
-
-out:
- ctx_data_free(cache->owner, data);
- ocf_io_put(io);
-
- if (env_atomic_dec_return(&mio->req_remaining))
- return;
-
- env_completion_complete(&mio->completion);
-}
-
-static int metadata_submit_io(
- ocf_cache_t cache,
- struct metadata_io *mio,
- uint32_t count,
- uint32_t written)
-{
- ctx_data_t *data;
- struct ocf_io *io;
- int err;
- int i;
-
- /* Allocate IO */
- io = ocf_new_cache_io(cache);
- if (!io) {
- err = -ENOMEM;
- goto error;
- }
-
- /* Allocate data buffer for this IO */
- data = ctx_data_alloc(cache->owner, count);
- if (!data) {
- err = -ENOMEM;
- goto put_io;
- }
-
- /* Fill data */
- for (i = 0; mio->dir == OCF_WRITE && i < count; i++) {
- err = mio->hndl_fn(cache, data,
- mio->page + written + i, mio->hndl_cntx);
- if (err)
- goto free_data;
- }
-
- /* Setup IO */
- ocf_io_configure(io,
- PAGES_TO_BYTES(mio->page + written),
- PAGES_TO_BYTES(count),
- mio->dir, 0, 0);
- ocf_io_set_cmpl(io, mio, NULL, metadata_io_end);
- err = ocf_io_set_data(io, data, 0);
- if (err)
- goto free_data;
-
- /* Submit IO */
- env_atomic_inc(&mio->req_remaining);
- ocf_volume_submit_io(io);
-
- return 0;
-
-free_data:
- ctx_data_free(cache->owner, data);
-put_io:
- ocf_io_put(io);
-error:
- mio->error = err;
- return err;
-}
-
-
-/*
- *
- */
-static int metadata_io(struct metadata_io *mio)
-{
- uint32_t max_count = metadata_io_max_page(mio->cache);
- uint32_t this_count, written = 0;
- uint32_t count = mio->count;
- unsigned char step = 0;
- int err;
-
- ocf_cache_t cache = mio->cache;
-
- /* Check direction value correctness */
- switch (mio->dir) {
- case OCF_WRITE:
- case OCF_READ:
- break;
- default:
- return -EINVAL;
- }
-
- env_atomic_set(&mio->req_remaining, 1);
- env_completion_init(&mio->completion);
-
- while (count) {
- this_count = OCF_MIN(count, max_count);
-
- err = metadata_submit_io(cache, mio, this_count, written);
- if (err)
- break;
-
- /* Update counters */
- count -= this_count;
- written += this_count;
-
- OCF_COND_RESCHED(step, 128);
- }
-
- if (env_atomic_dec_return(&mio->req_remaining) == 0)
- env_completion_complete(&mio->completion);
-
- /* Wait for all IO to be finished */
- env_completion_wait(&mio->completion);
-
- return mio->error;
-}
-
-/*
- *
- */
-int metadata_io_write_i(ocf_cache_t cache,
- uint32_t page, uint32_t count,
- ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
-{
- struct metadata_io mio = {
- .dir = OCF_WRITE,
- .cache = cache,
- .page = page,
- .count = count,
- .hndl_fn = hndl_fn,
- .hndl_cntx = hndl_cntx,
- };
-
- return metadata_io(&mio);
-}
-
-/*
- *
- */
-int metadata_io_read_i(ocf_cache_t cache,
- uint32_t page, uint32_t count,
- ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
-{
- struct metadata_io mio = {
- .dir = OCF_READ,
- .cache = cache,
- .page = page,
- .count = count,
- .hndl_fn = hndl_fn,
- .hndl_cntx = hndl_cntx,
- };
-
- return metadata_io(&mio);
-}
-
-/*
- *
- */
-static int metadata_io_write_fill(ocf_cache_t cache,
- ctx_data_t *data, uint32_t page, void *context)
-{
- ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE);
- return 0;
-}
-
-/*
- * Write request
- */
-int metadata_io_write(ocf_cache_t cache,
- void *data, uint32_t page)
-{
- struct metadata_io mio = {
- .dir = OCF_WRITE,
- .cache = cache,
- .page = page,
- .count = 1,
- .hndl_fn = metadata_io_write_fill,
- .hndl_cntx = data,
- };
-
-
- return metadata_io(&mio);
-}
diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h
index fb2d355..13544f8 100644
--- a/src/metadata/metadata_io.h
+++ b/src/metadata/metadata_io.h
@@ -36,7 +36,7 @@ typedef int (*ocf_metadata_io_event_t)(ocf_cache_t cache,
* @param error - error
* @param page - page that was written
*/
-typedef void (*ocf_metadata_io_hndl_on_write_t)(ocf_cache_t cache,
+typedef void (*ocf_metadata_io_end_t)(ocf_cache_t cache,
void *context, int error);
struct metadata_io_request_asynch;
@@ -50,9 +50,9 @@ struct metadata_io_request {
uint32_t page;
uint32_t count;
ocf_metadata_io_event_t on_meta_fill;
+ ocf_metadata_io_event_t on_meta_drain;
env_atomic req_remaining;
ctx_data_t *data;
- env_completion completion;
int error;
struct metadata_io_request_asynch *asynch;
env_atomic finished;
@@ -69,21 +69,6 @@ struct metadata_io_request_atomic {
int error;
};
-/*
- *
- */
-struct metadata_io {
- int error;
- int dir;
- ocf_cache_t cache;
- uint32_t page;
- uint32_t count;
- env_completion completion;
- env_atomic req_remaining;
- ocf_metadata_io_event_t hndl_fn;
- void *hndl_cntx;
-};
-
/*
* Asynchronous IO request context
*/
@@ -96,7 +81,7 @@ struct metadata_io_request_asynch {
env_atomic req_remaining;
env_atomic req_active;
uint32_t page;
- ocf_metadata_io_hndl_on_write_t on_complete;
+ ocf_metadata_io_end_t on_complete;
};
/**
@@ -110,70 +95,59 @@ struct metadata_io_request_asynch {
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
-typedef int (*ocf_metadata_atomic_io_event_t)(
- ocf_cache_t cache, uint64_t sector_addr,
+typedef int (*ocf_metadata_atomic_io_event_t)(void *priv, uint64_t sector_addr,
uint32_t sector_no, ctx_data_t *data);
/**
- * @brief Write page request
+ * @brief Iterative asynchronous read atomic metadata
*
* @param cache - Cache instance
- * @param data - Data to be written for specified page
- * @param page - Page of SSD (cache device) where data has to be placed
- * @return 0 - No errors, otherwise error occurred
- */
-int metadata_io_write(ocf_cache_t cache,
- void *data, uint32_t page);
-
-int metadata_io_read_i_atomic(ocf_cache_t cache,
- ocf_metadata_atomic_io_event_t hndl);
-
-/**
- * @brief Iterative pages write
- *
- * @param cache - Cache instance
- * @param page - Start page of SSD (cache device) where data will be written
- * @param count - Counts of page to be processed
- * @param hndl_fn - Fill callback is called to fill each pages with data
- * @param hndl_cntx - Caller context which is passed on fill callback request
+ * @param queue - Queue to be used for IO
+ * @param context - Read context
+ * @param drain_hndl - Drain callback
+ * @param compl_hndl - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
-int metadata_io_write_i(ocf_cache_t cache,
- uint32_t page, uint32_t count,
- ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
-
-/**
- * * @brief Iterative pages read
- *
- * @param cache - Cache instance
- * @param page - Start page of SSD (cache device) of data will be read
- * @param count - Counts of page to be processed
- * @param hndl_fn - Callback function is called on each page read completion
- * @param hndl_cntx - Caller context passed during handle function call
- *
- * @return 0 - No errors, otherwise error occurred
- */
-int metadata_io_read_i(ocf_cache_t cache,
- uint32_t page, uint32_t count,
- ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
+int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, ocf_metadata_atomic_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl);
/**
* @brief Iterative asynchronous pages write
*
* @param cache - Cache instance
+ * @param queue - Queue to be used for IO
* @param context - Read context
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
- * @param fill - Fill callback
- * @param complete - All IOs completed callback
+ * @param fill_hndl - Fill callback
+ * @param compl_hndl - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
- ocf_metadata_io_hndl_on_write_t compl_hndl);
+ ocf_metadata_io_end_t compl_hndl);
+
+/**
+ * @brief Iterative asynchronous pages read
+ *
+ * @param cache - Cache instance
+ * @param queue - Queue to be used for IO
+ * @param context - Read context
+ * @param page - Start page of SSD (cache device) where data will be read
+ * @param count - Counts of page to be processed
+ * @param drain_hndl - Drain callback
+ * @param compl_hndl - All IOs completed callback
+ *
+ * @return 0 - No errors, otherwise error occurred
+ */
+int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
+ void *context, uint32_t page, uint32_t count,
+ ocf_metadata_io_event_t drain_hndl,
+ ocf_metadata_io_end_t compl_hndl);
/**
* Function for initializing metadata io.
diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c
index db172ba..d3b9292 100644
--- a/src/metadata/metadata_raw.c
+++ b/src/metadata/metadata_raw.c
@@ -197,31 +197,23 @@ static int _raw_ram_set(ocf_cache_t cache,
return _RAW_RAM_SET(raw, line, data);
}
-/*
- * RAM Implementation - Flush specified element from SSD
- */
-static int _raw_ram_flush(ocf_cache_t cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line)
-{
- OCF_DEBUG_PARAM(cache, "Line = %u", line);
- OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line));
-
- ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size));
-
- return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line),
- _RAW_RAM_PAGE_SSD(raw, line));
-}
+struct _raw_ram_load_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
/*
* RAM Implementation - Load all IO callback
*/
-static int _raw_ram_load_all_io(ocf_cache_t cache,
- ctx_data_t *data, uint32_t page, void *context)
+static int _raw_ram_load_all_drain(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *priv)
{
+ struct _raw_ram_load_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint32_t size = raw->entry_size * raw->entries_in_page;
ocf_cache_line_t line;
uint32_t raw_page;
- struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context;
- uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
@@ -238,28 +230,60 @@ static int _raw_ram_load_all_io(ocf_cache_t cache,
return 0;
}
+static void _raw_ram_load_all_complete(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct _raw_ram_load_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
/*
* RAM Implementation - Load all metadata elements from SSD
*/
-static int _raw_ram_load_all(ocf_cache_t cache,
- struct ocf_metadata_raw *raw)
+static void _raw_ram_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
+ struct _raw_ram_load_all_context *context;
+ int result;
+
OCF_DEBUG_TRACE(cache);
- return metadata_io_read_i(cache, raw->ssd_pages_offset,
- raw->ssd_pages, _raw_ram_load_all_io, raw);
+ context = env_vmalloc(sizeof(*context));
+ if (!context) {
+ cmpl(priv, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_read_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ _raw_ram_load_all_drain, _raw_ram_load_all_complete);
+ if (result)
+ _raw_ram_load_all_complete(cache, context, result);
}
+struct _raw_ram_flush_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_all_fill(ocf_cache_t cache,
- ctx_data_t *data, uint32_t page, void *context)
+ ctx_data_t *data, uint32_t page, void *priv)
{
+ struct _raw_ram_flush_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint32_t size = raw->entry_size * raw->entries_in_page;
ocf_cache_line_t line;
uint32_t raw_page;
- struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
- uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
@@ -275,16 +299,41 @@ static int _raw_ram_flush_all_fill(ocf_cache_t cache,
return 0;
}
+static void _raw_ram_flush_all_complete(ocf_cache_t cache,
+ void *priv, int error)
+{
+ struct _raw_ram_flush_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
/*
* RAM Implementation - Flush all elements
*/
-static int _raw_ram_flush_all(ocf_cache_t cache,
- struct ocf_metadata_raw *raw)
+static void _raw_ram_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
+ struct _raw_ram_flush_all_context *context;
+ int result;
+
OCF_DEBUG_TRACE(cache);
- return metadata_io_write_i(cache, raw->ssd_pages_offset,
- raw->ssd_pages, _raw_ram_flush_all_fill, raw);
+ context = env_vmalloc(sizeof(*context));
+ if (!context) {
+ cmpl(priv, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ _raw_ram_flush_all_fill, _raw_ram_flush_all_complete);
+ if (result)
+ _raw_ram_flush_all_complete(cache, context, result);
}
/*
@@ -515,7 +564,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
- .flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
@@ -531,7 +579,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = raw_dynamic_set,
.rd_access = raw_dynamic_rd_access,
.wr_access = raw_dynamic_wr_access,
- .flush = raw_dynamic_flush,
.load_all = raw_dynamic_load_all,
.flush_all = raw_dynamic_flush_all,
.flush_mark = raw_dynamic_flush_mark,
@@ -547,7 +594,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
- .flush = raw_volatile_flush,
.load_all = raw_volatile_load_all,
.flush_all = raw_volatile_flush_all,
.flush_mark = raw_volatile_flush_mark,
@@ -563,7 +609,6 @@ static const struct raw_iface IRAW[metadata_raw_type_max] = {
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
- .flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = raw_atomic_flush_mark,
diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h
index c562b19..f665605 100644
--- a/src/metadata/metadata_raw.h
+++ b/src/metadata/metadata_raw.h
@@ -81,13 +81,13 @@ struct ocf_metadata_raw {
* RAW container interface
*/
struct raw_iface {
- int (*init)(struct ocf_cache *cache,
+ int (*init)(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
- int (*deinit)(struct ocf_cache *cache,
+ int (*deinit)(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
- size_t (*size_of)(struct ocf_cache *cache,
+ size_t (*size_of)(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/**
@@ -98,43 +98,40 @@ struct raw_iface {
*
* @return Number of pages (4 kiB) on cache device
*/
- uint32_t (*size_on_ssd)(struct ocf_cache *cache,
+ uint32_t (*size_on_ssd)(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
- uint32_t (*checksum)(struct ocf_cache *cache,
+ uint32_t (*checksum)(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
- int (*get)(struct ocf_cache *cache,
+ int (*get)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
- int (*set)(struct ocf_cache *cache,
+ int (*set)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
- const void* (*rd_access)(struct ocf_cache *cache,
+ const void* (*rd_access)(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
- void* (*wr_access)(struct ocf_cache *cache,
+ void* (*wr_access)(ocf_cache_t cache,
struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size);
- int (*flush)(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line);
+ void (*load_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
- int (*load_all)(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
+ void (*flush_all)(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
- int (*flush_all)(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
-
- void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *req,
+ void (*flush_mark)(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start,
uint8_t stop);
- int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *req,
+ int (*flush_do_asynch)(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw,
ocf_req_end_t complete);
};
@@ -146,7 +143,7 @@ struct raw_iface {
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
-int ocf_metadata_raw_init(struct ocf_cache *cache,
+int ocf_metadata_raw_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/**
@@ -156,7 +153,7 @@ int ocf_metadata_raw_init(struct ocf_cache *cache,
* @param raw - RAW descriptor
* @return 0 - Operation success, otherwise error
*/
-int ocf_metadata_raw_deinit(struct ocf_cache *cache,
+int ocf_metadata_raw_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/**
@@ -166,7 +163,7 @@ int ocf_metadata_raw_deinit(struct ocf_cache *cache,
* @param raw RAW descriptor
* @return Memory footprint
*/
-static inline size_t ocf_metadata_raw_size_of(struct ocf_cache *cache,
+static inline size_t ocf_metadata_raw_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
if (!raw->iface)
@@ -208,7 +205,7 @@ static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache,
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
-static inline int ocf_metadata_raw_get(struct ocf_cache *cache,
+static inline int ocf_metadata_raw_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
@@ -225,7 +222,7 @@ static inline int ocf_metadata_raw_get(struct ocf_cache *cache,
* @param size - Size of data
* @return 0 - Point to accessed data, in case of error NULL
*/
-static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache,
+static inline void *ocf_metadata_raw_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
@@ -243,7 +240,7 @@ static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache,
* @return 0 - Point to accessed data, in case of error NULL
*/
static inline const void *ocf_metadata_raw_rd_access(
- struct ocf_cache *cache, struct ocf_metadata_raw *raw,
+ ocf_cache_t cache, struct ocf_metadata_raw *raw,
ocf_cache_line_t line, uint32_t size)
{
return raw->iface->rd_access(cache, raw, line, size);
@@ -259,38 +256,26 @@ static inline const void *ocf_metadata_raw_rd_access(
* @param size - Size of data
* @return 0 - Operation success, otherwise error
*/
-static inline int ocf_metadata_raw_set(struct ocf_cache *cache,
+static inline int ocf_metadata_raw_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data,
uint32_t size)
{
return raw->iface->set(cache, raw, line, data, size);
}
-/**
- * @brief Flush specified element of metadata into SSD
- *
- * @param cache - Cache instance
- * @param raw - RAW descriptor
- * @param line - Cache line to be flushed
- * @return 0 - Operation success, otherwise error
- */
-static inline int ocf_metadata_raw_flush(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line)
-{
- return raw->iface->flush(cache, raw, line);
-}
-
/**
* @brief Load all entries from SSD cache (cahce cache)
*
* @param cache - Cache instance
* @param raw - RAW descriptor
- * @return 0 - Operation success, otherwise error
+ * @param cmpl - Completion callback
+ * @param priv - Completion callback context
*/
-static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
+static inline void ocf_metadata_raw_load_all(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
- return raw->iface->load_all(cache, raw);
+ raw->iface->load_all(cache, raw, cmpl, priv);
}
/**
@@ -298,23 +283,25 @@ static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache,
*
* @param cache - Cache instance
* @param raw - RAW descriptor
- * @return 0 - Operation success, otherwise error
+ * @param cmpl - Completion callback
+ * @param priv - Completion callback context
*/
-static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
+static inline void ocf_metadata_raw_flush_all(ocf_cache_t cache,
+ struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
- return raw->iface->flush_all(cache, raw);
+ raw->iface->flush_all(cache, raw, cmpl, priv);
}
-static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache,
+static inline void ocf_metadata_raw_flush_mark(ocf_cache_t cache,
struct ocf_metadata_raw *raw, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
raw->iface->flush_mark(cache, req, map_idx, to_state, start, stop);
}
-static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache,
+static inline int ocf_metadata_raw_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete)
{
diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c
index f2b54e7..65573d5 100644
--- a/src/metadata/metadata_raw_dynamic.c
+++ b/src/metadata/metadata_raw_dynamic.c
@@ -8,7 +8,10 @@
#include "metadata_raw.h"
#include "metadata_raw_dynamic.h"
#include "metadata_io.h"
+#include "../engine/cache_engine.h"
+#include "../engine/engine_common.h"
#include "../utils/utils_io.h"
+#include "../utils/utils_req.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
@@ -59,7 +62,7 @@ struct _raw_ctrl {
void *pages[];
};
-static void *_raw_dynamic_get_item(struct ocf_cache *cache,
+static void *_raw_dynamic_get_item(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size)
{
void *new = NULL;
@@ -110,7 +113,7 @@ _raw_dynamic_get_item_SKIP:
/*
* RAM DYNAMIC Implementation - De-Initialize
*/
-int raw_dynamic_deinit(struct ocf_cache *cache,
+int raw_dynamic_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
uint32_t i;
@@ -133,7 +136,7 @@ int raw_dynamic_deinit(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - Initialize
*/
-int raw_dynamic_init(struct ocf_cache *cache,
+int raw_dynamic_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl;
@@ -163,7 +166,7 @@ int raw_dynamic_init(struct ocf_cache *cache,
/*
* RAW DYNAMIC Implementation - Size of
*/
-size_t raw_dynamic_size_of(struct ocf_cache *cache,
+size_t raw_dynamic_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
@@ -185,7 +188,7 @@ size_t raw_dynamic_size_of(struct ocf_cache *cache,
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
-uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
+uint32_t raw_dynamic_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
@@ -196,7 +199,7 @@ uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - Checksum
*/
-uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
+uint32_t raw_dynamic_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
@@ -216,7 +219,7 @@ uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - Get
*/
-int raw_dynamic_get(struct ocf_cache *cache,
+int raw_dynamic_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
@@ -234,7 +237,7 @@ int raw_dynamic_get(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - Set
*/
-int raw_dynamic_set(struct ocf_cache *cache,
+int raw_dynamic_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
@@ -251,7 +254,7 @@ int raw_dynamic_set(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - access
*/
-const void *raw_dynamic_rd_access(struct ocf_cache *cache,
+const void *raw_dynamic_rd_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
@@ -261,144 +264,240 @@ const void *raw_dynamic_rd_access(struct ocf_cache *cache,
/*
* RAM DYNAMIC Implementation - access
*/
-void *raw_dynamic_wr_access(struct ocf_cache *cache,
+void *raw_dynamic_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
return _raw_dynamic_get_item(cache, raw, line, size);
}
-int raw_dynamic_flush(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line)
-{
- uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
- struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
-
- OCF_DEBUG_PARAM(cache, "Line %u, page = %u", line, page);
-
- ENV_BUG_ON(!ctrl->pages[page]);
-
- return metadata_io_write(cache, ctrl->pages[page],
- raw->ssd_pages_offset + page);
-}
-
/*
* RAM DYNAMIC Implementation - Load all
*/
#define RAW_DYNAMIC_LOAD_PAGES 128
-int raw_dynamic_load_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
-{
- struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
- uint64_t i = 0, i_page = 0;
- uint64_t count = RAW_DYNAMIC_LOAD_PAGES;
- int error = 0, cmp;
-
+struct raw_dynamic_load_all_context {
+ struct ocf_metadata_raw *raw;
+ struct ocf_request *req;
+ ocf_cache_t cache;
struct ocf_io *io;
- ctx_data_t *data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
- char *page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
- char *zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ ctx_data_t *data;
+ uint8_t *zpage;
+ uint8_t *page;
+ uint64_t i;
+ int error;
- if (!data || !page || !zpage) {
- ctx_data_free(cache->owner, data);
- env_free(page);
- env_free(zpage);
- return -ENOMEM;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
+static void raw_dynamic_load_all_complete(
+ struct raw_dynamic_load_all_context *context, int error)
+{
+ context->cmpl(context->priv, error);
+
+ ocf_req_put(context->req);
+ env_free(context->page);
+ env_free(context->zpage);
+ ctx_data_free(context->cache->owner, context->data);
+ env_vfree(context);
+}
+
+static int raw_dynamic_load_all_update(struct ocf_request *req);
+
+static const struct ocf_io_if _io_if_raw_dynamic_load_all_update = {
+ .read = raw_dynamic_load_all_update,
+ .write = raw_dynamic_load_all_update,
+};
+
+static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
+{
+ struct raw_dynamic_load_all_context *context = io->priv1;
+
+ ocf_io_put(io);
+
+ if (error) {
+ raw_dynamic_load_all_complete(context, error);
+ return;
}
+ context->req->io_if = &_io_if_raw_dynamic_load_all_update;
+ ocf_engine_push_req_front(context->req, true);
+}
+
+static int raw_dynamic_load_all_read(struct ocf_request *req)
+{
+ struct raw_dynamic_load_all_context *context = req->priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ uint64_t count;
+ int result;
+
+ count = OCF_MIN(RAW_DYNAMIC_LOAD_PAGES, raw->ssd_pages - context->i);
+
+ /* Allocate IO */
+ context->io = ocf_new_cache_io(context->cache);
+ if (!context->io) {
+ raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
+ return 0;
+ }
+
+ /* Setup IO */
+ result = ocf_io_set_data(context->io, context->data, 0);
+ if (result) {
+ ocf_io_put(context->io);
+ raw_dynamic_load_all_complete(context, result);
+ return 0;
+ }
+ ocf_io_configure(context->io,
+ PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
+ PAGES_TO_BYTES(count), OCF_READ, 0, 0);
+
+ ocf_io_set_queue(context->io, req->io_queue);
+ ocf_io_set_cmpl(context->io, context, NULL,
+ raw_dynamic_load_all_read_end);
+
+ /* Submit IO */
+ ocf_volume_submit_io(context->io);
+
+ return 0;
+}
+
+static const struct ocf_io_if _io_if_raw_dynamic_load_all_read = {
+ .read = raw_dynamic_load_all_read,
+ .write = raw_dynamic_load_all_read,
+};
+
+static int raw_dynamic_load_all_update(struct ocf_request *req)
+{
+ struct raw_dynamic_load_all_context *context = req->priv;
+ struct ocf_metadata_raw *raw = context->raw;
+ struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ ocf_cache_t cache = context->cache;
+ uint64_t count = BYTES_TO_PAGES(context->io->bytes);
+ uint64_t i_page;
+ int result = 0;
+ int cmp;
+
+ /* Reset head of data buffer */
+ ctx_data_seek_check(context->cache->owner, context->data,
+ ctx_data_seek_begin, 0);
+
+ for (i_page = 0; i_page < count; i_page++, context->i++) {
+ if (!context->page) {
+ context->page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ if (!context->page) {
+ /* Allocation error */
+ result = -OCF_ERR_NO_MEM;
+ break;
+ }
+ }
+
+ ctx_data_rd_check(cache->owner, context->page,
+ context->data, PAGE_SIZE);
+
+ result = env_memcmp(context->zpage, PAGE_SIZE, context->page,
+ PAGE_SIZE, &cmp);
+ if (result)
+ break;
+
+ /* When page is zero set, no need to allocate space for it */
+ if (cmp == 0) {
+ OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
+ continue;
+ }
+
+ OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
+
+ ctrl->pages[context->i] = context->page;
+ context->page = NULL;
+
+ env_atomic_inc(&ctrl->count);
+ }
+
+ if (result || context->i >= raw->ssd_pages) {
+ raw_dynamic_load_all_complete(context, result);
+ return 0;
+ }
+
+ context->req->io_if = &_io_if_raw_dynamic_load_all_read;
+ ocf_engine_push_req_front(context->req, true);
+
+ return 0;
+}
+
+void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct raw_dynamic_load_all_context *context;
+ int result;
+
OCF_DEBUG_TRACE(cache);
- /* Loading, need to load all metadata, when page is zero set, no need
- * to allocate space for it
- */
-
- while (i < raw->ssd_pages) {
- if (i + count > raw->ssd_pages)
- count = raw->ssd_pages - i;
-
- /* Allocate IO */
- io = ocf_new_cache_io(cache);
- if (!io) {
- error = -ENOMEM;
- break;
- }
-
- /* Setup IO */
- error = ocf_io_set_data(io, data, 0);
- if (error) {
- ocf_io_put(io);
- break;
- }
- ocf_io_configure(io,
- PAGES_TO_BYTES(raw->ssd_pages_offset + i),
- PAGES_TO_BYTES(count), OCF_READ, 0, 0);
-
- /* Submit IO */
- error = ocf_submit_io_wait(io);
- ocf_io_put(io);
- io = NULL;
-
- if (error)
- break;
-
- /* Reset head of data buffer */
- ctx_data_seek_check(cache->owner, data,
- ctx_data_seek_begin, 0);
-
- for (i_page = 0; i_page < count; i_page++, i++) {
- if (!page) {
- page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
- if (!page) {
- /* Allocation error */
- error = -ENOMEM;
- break;
- }
- }
-
- ctx_data_rd_check(cache->owner, page, data, PAGE_SIZE);
-
- error = env_memcmp(zpage, PAGE_SIZE, page,
- PAGE_SIZE, &cmp);
- if (error)
- break;
-
- if (cmp == 0) {
- OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
- continue;
- }
-
- OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
-
- ctrl->pages[i] = page;
- page = NULL;
-
- env_atomic_inc(&ctrl->count);
- }
-
- if (error)
- break;
+ context = env_vzalloc(sizeof(*context));
+ if (!context) {
+ cmpl(priv, -OCF_ERR_NO_MEM);
+ return;
}
- env_free(zpage);
- env_free(page);
- ctx_data_free(cache->owner, data);
+ context->raw = raw;
+ context->cache = cache;
+ context->cmpl = cmpl;
+ context->priv = priv;
- return error;
+ context->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
+ if (!context->data) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_data;
+ }
+
+ context->zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
+ if (!context->zpage) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_zpage;
+ }
+
+ context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
+ if (!context->req) {
+ result = -OCF_ERR_NO_MEM;
+ goto err_req;
+ }
+
+ context->req->info.internal = true;
+ context->req->priv = context;
+ context->req->io_if = &_io_if_raw_dynamic_load_all_read;
+
+ ocf_engine_push_req_front(context->req, true);
+ return;
+
+err_req:
+ env_free(context->zpage);
+err_zpage:
+ ctx_data_free(cache->owner, context->data);
+err_data:
+ env_vfree(context);
+ cmpl(priv, result);
}
/*
-* RAM DYNAMIC Implementation - Flush all
-*/
+ * RAM DYNAMIC Implementation - Flush all
+ */
+
+struct raw_dynamic_flush_all_context {
+ struct ocf_metadata_raw *raw;
+ ocf_metadata_end_t cmpl;
+ void *priv;
+};
+
/*
* RAM Implementation - Flush IO callback - Fill page
*/
-static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache,
- ctx_data_t *data, uint32_t page, void *context)
+static int raw_dynamic_flush_all_fill(ocf_cache_t cache,
+ ctx_data_t *data, uint32_t page, void *priv)
{
- uint32_t raw_page;
- struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
+ struct raw_dynamic_flush_all_context *context = priv;
+ struct ocf_metadata_raw *raw = context->raw;
struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
+ uint32_t raw_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
@@ -417,18 +516,45 @@ static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache,
return 0;
}
-int raw_dynamic_flush_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
+static void raw_dynamic_flush_all_complete(ocf_cache_t cache,
+ void *priv, int error)
{
+ struct raw_dynamic_flush_all_context *context = priv;
+
+ context->cmpl(context->priv, error);
+ env_vfree(context);
+}
+
+void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
+{
+ struct raw_dynamic_flush_all_context *context;
+ int result;
+
OCF_DEBUG_TRACE(cache);
- return metadata_io_write_i(cache, raw->ssd_pages_offset,
- raw->ssd_pages, _raw_dynamic_flush_all_fill, raw);
+
+ context = env_vmalloc(sizeof(*context));
+ if (!context) {
+ cmpl(priv, -OCF_ERR_NO_MEM);
+ return;
+ }
+
+ context->raw = raw;
+ context->cmpl = cmpl;
+ context->priv = priv;
+
+ result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
+ raw->ssd_pages_offset, raw->ssd_pages,
+ raw_dynamic_flush_all_fill,
+ raw_dynamic_flush_all_complete);
+ if (result)
+ cmpl(priv, result);
}
/*
* RAM DYNAMIC Implementation - Mark to Flush
*/
-void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
ENV_BUG();
@@ -437,7 +563,7 @@ void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
/*
* RAM DYNAMIC Implementation - Do flushing asynchronously
*/
-int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
+int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete)
{
ENV_BUG();
diff --git a/src/metadata/metadata_raw_dynamic.h b/src/metadata/metadata_raw_dynamic.h
index c56e7d6..cd02d83 100644
--- a/src/metadata/metadata_raw_dynamic.h
+++ b/src/metadata/metadata_raw_dynamic.h
@@ -14,91 +14,85 @@
/*
* RAW DYNAMIC - Initialize
*/
-int raw_dynamic_init(struct ocf_cache *cache,
+int raw_dynamic_init(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - De-Initialize
*/
-int raw_dynamic_deinit(struct ocf_cache *cache,
+int raw_dynamic_deinit(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get size of memory footprint of this RAW metadata container
*/
-size_t raw_dynamic_size_of(struct ocf_cache *cache,
+size_t raw_dynamic_size_of(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Size on SSD
*/
-uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache,
+uint32_t raw_dynamic_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC Implementation - Checksum
*/
-uint32_t raw_dynamic_checksum(struct ocf_cache *cache,
+uint32_t raw_dynamic_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW DYNAMIC - Get specified entry
*/
-int raw_dynamic_get(struct ocf_cache *cache,
+int raw_dynamic_get(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Set specified entry
*/
-int raw_dynamic_set(struct ocf_cache *cache,
+int raw_dynamic_set(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size);
/*
* RAW DYNAMIC - Read only access for specified entry
*/
-const void *raw_dynamic_rd_access(struct ocf_cache *cache,
+const void *raw_dynamic_rd_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
/*
* RAW DYNAMIC - Write access for specified entry
*/
-void *raw_dynamic_wr_access(struct ocf_cache *cache,
+void *raw_dynamic_wr_access(ocf_cache_t cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size);
-/*
- * RAW DYNAMIC - Flush specified entry
- */
-int raw_dynamic_flush(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line);
-
/*
* RAW DYNAMIC - Load all metadata of this RAW metadata container
* from cache device
*/
-int raw_dynamic_load_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
+void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
/*
* RAW DYNAMIC - Flush all metadata of this RAW metadata container
* to cache device
*/
-int raw_dynamic_flush_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
+void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
/*
* RAW DYNAMIC - Mark specified entry to be flushed
*/
-void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* DYNAMIC Implementation - Do Flush Asynchronously
*/
-int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, struct ocf_request *req,
+int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
struct ocf_metadata_raw *raw, ocf_req_end_t complete);
diff --git a/src/metadata/metadata_raw_volatile.c b/src/metadata/metadata_raw_volatile.c
index f87b2b8..2b7d8ee 100644
--- a/src/metadata/metadata_raw_volatile.c
+++ b/src/metadata/metadata_raw_volatile.c
@@ -12,7 +12,7 @@
/*
* RAW volatile Implementation - Size on SSD
*/
-uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
+uint32_t raw_volatile_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
return 0;
@@ -21,43 +21,34 @@ uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
/*
* RAW volatile Implementation - Checksum
*/
-uint32_t raw_volatile_checksum(struct ocf_cache *cache,
+uint32_t raw_volatile_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw)
{
return 0;
}
-/*
- * RAW volatile Implementation - Flush specified element to SSD
- */
-int raw_volatile_flush(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line)
-{
- return 0;
-}
-
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
-int raw_volatile_load_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
+void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
- return -ENOTSUP;
+ cmpl(priv, -ENOTSUP);
}
/*
* RAM Implementation - Flush all elements
*/
-int raw_volatile_flush_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw)
+void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv)
{
- return 0;
+ cmpl(priv, -ENOTSUP);
}
/*
* RAM RAM Implementation - Mark to Flush
*/
-void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
}
@@ -65,7 +56,7 @@ void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
/*
* RAM RAM Implementation - Do Flush asynchronously
*/
-int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
+int raw_volatile_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete)
{
diff --git a/src/metadata/metadata_raw_volatile.h b/src/metadata/metadata_raw_volatile.h
index 416fc9a..db8b276 100644
--- a/src/metadata/metadata_raw_volatile.h
+++ b/src/metadata/metadata_raw_volatile.h
@@ -9,43 +9,37 @@
/*
* RAW volatile Implementation - Size on SSD
*/
-uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache,
+uint32_t raw_volatile_size_on_ssd(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
/*
* RAW volatile Implementation - Checksum
*/
-uint32_t raw_volatile_checksum(struct ocf_cache *cache,
+uint32_t raw_volatile_checksum(ocf_cache_t cache,
struct ocf_metadata_raw *raw);
-/*
- * RAW volatile Implementation - Flush specified element to SSD
- */
-int raw_volatile_flush(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw, ocf_cache_line_t line);
-
/*
* RAW volatile Implementation - Load all metadata elements from SSD
*/
-int raw_volatile_load_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
+void raw_volatile_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
/*
* RAW volatile Implementation - Flush all elements
*/
-int raw_volatile_flush_all(struct ocf_cache *cache,
- struct ocf_metadata_raw *raw);
+void raw_volatile_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
+ ocf_metadata_end_t cmpl, void *priv);
/*
* RAM RAW volatile Implementation - Mark to Flush
*/
-void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *req,
+void raw_volatile_flush_mark(ocf_cache_t cache, struct ocf_request *req,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/*
* RAM RAW volatile Implementation - Do Flush asynchronously
*/
-int raw_volatile_flush_do_asynch(struct ocf_cache *cache,
+int raw_volatile_flush_do_asynch(ocf_cache_t cache,
struct ocf_request *req, struct ocf_metadata_raw *raw,
ocf_req_end_t complete);
diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h
index c44e753..81a9998 100644
--- a/src/metadata/metadata_structs.h
+++ b/src/metadata/metadata_structs.h
@@ -6,6 +6,7 @@
#ifndef __METADATA_STRUCTS_H__
#define __METADATA_STRUCTS_H__
+#include "metadata_common.h"
#include "../eviction/eviction.h"
#include "../cleaning/cleaning.h"
#include "../ocf_request.h"
@@ -161,33 +162,31 @@ struct ocf_metadata_iface {
* @brief Load metadata from cache device
*
* @param[in] cache - Cache instance
- * @return 0 - Operation success otherwise failure
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
*/
- int (*load_all)(struct ocf_cache *cache);
+ void (*load_all)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Load metadata from recovery procedure
- * recovery
+ *
* @param[in] cache - Cache instance
- * @return 0 - Operation success otherwise failure
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
*/
- int (*load_recovery)(struct ocf_cache *cache);
+ void (*load_recovery)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Flush metadata into cahce cache
*
* @param[in] cache - Cache instance
- * @return 0 - Operation success otherwise failure
+ * @param[in] cmpl - Completion callback
+ * @param[in] priv - Completion callback context
*/
- int (*flush_all)(struct ocf_cache *cache);
-
- /**
- * @brief Flush metadata for specified cache line
- *
- * @param[in] cache - Cache instance
- * @param[in] line - cache line which to be flushed
- */
- void (*flush)(struct ocf_cache *cache, ocf_cache_line_t line);
+ void (*flush_all)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
/**
* @brief Mark specified cache line to be flushed
@@ -217,12 +216,15 @@ struct ocf_metadata_iface {
enum ocf_metadata_shutdown_status (*get_shutdown_status)(
struct ocf_cache *cache);
- int (*set_shutdown_status)(struct ocf_cache *cache,
- enum ocf_metadata_shutdown_status shutdown_status);
+ void (*set_shutdown_status)(ocf_cache_t cache,
+ enum ocf_metadata_shutdown_status shutdown_status,
+ ocf_metadata_end_t cmpl, void *priv);
- int (*load_superblock)(struct ocf_cache *cache);
+ void (*load_superblock)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
- int (*flush_superblock)(struct ocf_cache *cache);
+ void (*flush_superblock)(ocf_cache_t cache,
+ ocf_metadata_end_t cmpl, void *priv);
uint64_t (*get_reserved_lba)(struct ocf_cache *cache);
@@ -249,16 +251,6 @@ struct ocf_metadata_iface {
ocf_cache_line_t line,
union eviction_policy_meta *eviction_policy);
- /**
- * @brief Flush eviction policy for given cache line
- *
- * @param[in] cache - Cache instance
- * @param[in] line - Cache line for which flushing has to be performed
- */
- void (*flush_eviction_policy)(struct ocf_cache *cache,
- ocf_cache_line_t line);
-
-
/**
* @brief Get cleaning policy
*
@@ -282,15 +274,6 @@ struct ocf_metadata_iface {
ocf_cache_line_t line,
struct cleaning_policy_meta *cleaning_policy);
- /**
- * @brief Flush cleaning policy for given cache line
- *
- * @param[in] cache - Cache instance
- * @param[in] line - Cache line for which flushing has to be performed
- */
- void (*flush_cleaning_policy)(struct ocf_cache *cache,
- ocf_cache_line_t line);
-
/**
* @brief Get hash table for specified index
*
@@ -312,15 +295,6 @@ struct ocf_metadata_iface {
void (*set_hash)(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line);
- /**
- * @brief Flush has table for specified index
- *
- * @param[in] cache - Cache instance
- * @param[in] index - Hash table index
- */
- void (*flush_hash)(struct ocf_cache *cache,
- ocf_cache_line_t index);
-
/**
* @brief Get hash table entries
*
diff --git a/src/metadata/metadata_superblock.h b/src/metadata/metadata_superblock.h
index 9bbd3dc..2af7c47 100644
--- a/src/metadata/metadata_superblock.h
+++ b/src/metadata/metadata_superblock.h
@@ -66,36 +66,26 @@ static inline void ocf_metadata_set_shutdown_status(ocf_cache_t cache,
enum ocf_metadata_shutdown_status shutdown_status,
ocf_metadata_end_t cmpl, void *priv)
{
- int result;
-
- result = cache->metadata.iface.set_shutdown_status(cache,
- shutdown_status);
- cmpl(priv, result);
+ cache->metadata.iface.set_shutdown_status(cache, shutdown_status,
+ cmpl, priv);
}
static inline void ocf_metadata_load_superblock(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
- int result;
-
- result = cache->metadata.iface.load_superblock(cache);
- cmpl(priv, result);
+ cache->metadata.iface.load_superblock(cache, cmpl, priv);
}
static inline void ocf_metadata_flush_superblock(ocf_cache_t cache,
ocf_metadata_end_t cmpl, void *priv)
{
- int result = 0;
+ /* TODO: Shouldn't it be checked by the caller? */
+ if (!cache->device) {
+ cmpl(priv, 0);
+ return;
+ }
- if (cache->device)
- result = cache->metadata.iface.flush_superblock(cache);
-
- cmpl(priv, result);
-}
-
-static inline uint64_t ocf_metadata_get_reserved_lba(ocf_cache_t cache)
-{
- return cache->metadata.iface.get_reserved_lba(cache);
+ cache->metadata.iface.flush_superblock(cache, cmpl, priv);
}
#endif /* METADATA_SUPERBLOCK_H_ */
diff --git a/src/utils/utils_io.c b/src/utils/utils_io.c
index 7ae5ce5..a7092af 100644
--- a/src/utils/utils_io.c
+++ b/src/utils/utils_io.c
@@ -369,31 +369,3 @@ void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
}
ocf_volume_submit_io(io);
}
-
-struct ocf_submit_io_wait_context {
- env_completion complete;
- int error;
- env_atomic req_remaining;
-};
-
-static void ocf_submit_io_wait_end(struct ocf_io *io, int error)
-{
- struct ocf_submit_io_wait_context *context = io->priv1;
-
- context->error |= error;
- env_completion_complete(&context->complete);
-}
-
-int ocf_submit_io_wait(struct ocf_io *io)
-{
- struct ocf_submit_io_wait_context context;
-
- ENV_BUG_ON(env_memset(&context, sizeof(context), 0));
- env_completion_init(&context.complete);
- context.error = 0;
-
- ocf_io_set_cmpl(io, &context, NULL, ocf_submit_io_wait_end);
- ocf_volume_submit_io(io);
- env_completion_wait(&context.complete);
- return context.error;
-}
diff --git a/src/utils/utils_io.h b/src/utils/utils_io.h
index 7254ee5..2a31f16 100644
--- a/src/utils/utils_io.h
+++ b/src/utils/utils_io.h
@@ -45,8 +45,6 @@ static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1,
typedef void (*ocf_submit_end_t)(void *priv, int error);
-int ocf_submit_io_wait(struct ocf_io *io);
-
void ocf_submit_volume_flush(ocf_volume_t volume,
ocf_submit_end_t cmpl, void *priv);
diff --git a/tests/unit/tests/metadata/metadata_io.c/metadata_io.c b/tests/unit/tests/metadata/metadata_io.c/metadata_io.c
deleted file mode 100644
index 73e9fec..0000000
--- a/tests/unit/tests/metadata/metadata_io.c/metadata_io.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright(c) 2012-2018 Intel Corporation
- * SPDX-License-Identifier: BSD-3-Clause-Clear
- */
-
-//src/metadata/metadata_io.c
-//metadata_io
-
-#undef static
-#undef inline
-
-/*
- * This headers must be in test source file. It's important that cmocka.h is
- * last.
- */
-#include
-#include
-#include
-#include
-#include "print_desc.h"
-
-/*
- * Headers from tested target.
- */
-#include "metadata.h"
-#include "metadata_io.h"
-#include "../engine/cache_engine.h"
-#include "../engine/engine_common.h"
-#include "../engine/engine_bf.h"
-#include "../utils/utils_cache_line.h"
-#include "../utils/utils_io.h"
-#include "../utils/utils_allocator.h"
-#include "../ocf_def_priv.h"
-
-uint32_t __wrap_metadata_io_max_page(struct ocf_cache *cache)
-{
- function_called();
- return mock();
-}
-
-void __wrap_env_cond_resched(void)
-{
-}
-
-void __wrap_ocf_engine_push_req_front(struct ocf_request *req)
-{
-}
-
-int __wrap_ocf_realloc(void **mem, size_t size, size_t count, size_t *limit)
-{
-}
-
-int __wrap_ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit)
-{
-}
-
-ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache)
-{
-}
-
-int __wrap_ocf_log_raw(ocf_logger_t logger, ocf_logger_lvl_t lvl,
- const char *fmt, ...)
-{
-}
-
-int __wrap_metadata_submit_io(
- struct ocf_cache *cache,
- struct metadata_io *mio,
- uint32_t count,
- uint32_t written)
-{
-}
-
-int __wrap_ocf_restart_meta_io(struct ocf_request *req)
-{
-}
-
-static void metadata_io_test01(void **state)
-{
- int result;
- struct metadata_io mio;
- struct ocf_cache cache;
-
- print_test_description("Check error no. when invalid operation is given");
-
- mio.dir = -1;
- mio.cache = &cache;
-
- expect_function_call(__wrap_metadata_io_max_page);
- will_return(__wrap_metadata_io_max_page, 256);
-
- result = metadata_io(&mio);
-
- assert_int_equal(result, -EINVAL);
-}
-
-
-int main(void)
-{
- const struct CMUnitTest tests[] = {
- cmocka_unit_test(metadata_io_test01)
- };
-
- return cmocka_run_group_tests(tests, NULL, NULL);
-}
diff --git a/tests/unit/tests/metadata/metadata_io.c/metadata_submit_io.c b/tests/unit/tests/metadata/metadata_io.c/metadata_submit_io.c
deleted file mode 100644
index 36faeab..0000000
--- a/tests/unit/tests/metadata/metadata_io.c/metadata_submit_io.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright(c) 2012-2018 Intel Corporation
- * SPDX-License-Identifier: BSD-3-Clause-Clear
- */
-
-//src/metadata/metadata_io.c
-//metadata_submit_io
-
-#undef static
-#undef inline
-
-/*
- * This headers must be in test source file. It's important that cmocka.h is
- * last.
- */
-#include
-#include
-#include
-#include
-#include "print_desc.h"
-
-/*
- * Headers from tested target.
- */
-#include "metadata.h"
-#include "metadata_io.h"
-#include "../engine/cache_engine.h"
-#include "../engine/engine_common.h"
-#include "../engine/engine_bf.h"
-#include "../utils/utils_cache_line.h"
-#include "../utils/utils_allocator.h"
-#include "../ocf_def_priv.h"
-
-struct ocf_io *__wrap_ocf_new_cache_io(struct ocf_cache *cache)
-{
- function_called();
- return mock_ptr_type(struct ocf_io *);
-}
-
-int __wrap_metadata_io_write_fill(struct ocf_cache *cache,
- ctx_data_t *data, uint32_t page, void *context)
-{
- function_called();
- return mock();
-}
-
-void *__wrap_ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages)
-{
- function_called();
- return mock_ptr_type(void*);
-}
-
-void __wrap_ocf_io_configure(struct ocf_io *io, uint64_t addr,
- uint32_t bytes, uint32_t dir, uint32_t class, uint64_t flags)
-{
- function_called();
-}
-
-void __wrap_metadata_io_end(struct ocf_io *io, int error)
-{
-}
-
-void __wrap_ocf_io_set_cmpl(struct ocf_io *io, void *context,
- void *context2, ocf_end_io_t fn)
-{
- function_called();
-}
-
-int __wrap_ocf_io_set_data(struct ocf_io *io, ctx_data_t *data,
- uint32_t offset)
-{
- function_called();
- return mock();
-}
-
-void __wrap_ocf_volume_submit_io(struct ocf_io *io)
-{
- function_called();
-}
-
-void __wrap_ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data)
-{
- function_called();
-}
-
-void __wrap_ocf_io_put(struct ocf_io *io)
-{
- function_called();
-}
-
-int __wrap_ocf_restart_meta_io(struct ocf_request *req)
-{
-}
-
-void __wrap_env_atomic_inc(env_atomic *a)
-{
- function_called();
-}
-
-static void metadata_submit_io_test01(void **state)
-{
- int result;
- struct metadata_io mio;
- struct ocf_cache cache;
- uint32_t count;
- uint32_t written;
-
- print_test_description("Couldn't allocate new IO");
-
- expect_function_call(__wrap_ocf_new_cache_io);
- will_return(__wrap_ocf_new_cache_io, 0);
-
- result = metadata_submit_io(&cache, &mio, count, written);
-
- assert_int_equal(result, -ENOMEM);
- assert_int_equal(mio.error, -ENOMEM);
-}
-
-static void metadata_submit_io_test02(void **state)
-{
- int result;
- struct metadata_io mio;
- struct ocf_cache cache;
- uint32_t count;
- uint32_t written;
-
- print_test_description("Couldn't allocate data buffer for IO");
-
- expect_function_call(__wrap_ocf_new_cache_io);
- will_return(__wrap_ocf_new_cache_io, 1);
-
- expect_function_call(__wrap_ctx_data_alloc);
- will_return(__wrap_ctx_data_alloc, 0);
-
- expect_function_call(__wrap_ocf_io_put);
-
- result = metadata_submit_io(&cache, &mio, count, written);
-
- assert_int_equal(result, -ENOMEM);
- assert_int_equal(mio.error, -ENOMEM);
-}
-
-static void metadata_submit_io_test03(void **state)
-{
- int result;
- struct metadata_io mio;
- struct ocf_cache cache;
- uint32_t count;
- uint32_t written;
- int mio_err = 0;
-
- print_test_description("Write operation is performed successfully");
-
- mio.hndl_fn = __wrap_metadata_io_write_fill;
-
- mio.dir = OCF_WRITE;
- mio.error = mio_err;
- count = 1;
-
- expect_function_call(__wrap_ocf_new_cache_io);
- will_return(__wrap_ocf_new_cache_io, 1);
-
- expect_function_call(__wrap_ctx_data_alloc);
- will_return(__wrap_ctx_data_alloc, 1);
-
- expect_function_call(__wrap_metadata_io_write_fill);
- will_return(__wrap_metadata_io_write_fill, 0);
-
- expect_function_call(__wrap_ocf_io_configure);
-
- expect_function_call(__wrap_ocf_io_set_cmpl);
-
- expect_function_call(__wrap_ocf_io_set_data);
- will_return(__wrap_ocf_io_set_data, 0);
-
- expect_function_call(__wrap_env_atomic_inc);
-
- expect_function_call(__wrap_ocf_volume_submit_io);
-
- result = metadata_submit_io(&cache, &mio, count, written);
-
- assert_int_equal(result, 0);
- assert_int_equal(mio.error, mio_err);
-}
-
-static void metadata_submit_io_test04(void **state)
-{
- int result;
- int i;
- int interations_before_fail;
- struct metadata_io mio;
- struct ocf_cache cache;
- uint32_t count;
- uint32_t written;
-
- print_test_description("Write operation is performed, but if fails at 3rd iteration");
-
- mio.hndl_fn = __wrap_metadata_io_write_fill;
-
- mio.dir = OCF_WRITE;
- count = 3;
- interations_before_fail = 2;
-
- expect_function_call(__wrap_ocf_new_cache_io);
- will_return(__wrap_ocf_new_cache_io, 1);
-
- expect_function_call(__wrap_ctx_data_alloc);
- will_return(__wrap_ctx_data_alloc, 1);
-
- for (i = 0; i < interations_before_fail; i++) {
- expect_function_call(__wrap_metadata_io_write_fill);
- will_return(__wrap_metadata_io_write_fill, 0);
- }
-
- expect_function_call(__wrap_metadata_io_write_fill);
- will_return(__wrap_metadata_io_write_fill, 1);
-
- expect_function_call(__wrap_ctx_data_free);
-
- expect_function_call(__wrap_ocf_io_put);
-
- result = metadata_submit_io(&cache, &mio, count, written);
-
- assert_int_equal(result, 1);
- assert_int_equal(mio.error, 1);
-}
-
-
-/*
- * Main function. It runs tests.
- */
-int main(void)
-{
- const struct CMUnitTest tests[] = {
- cmocka_unit_test(metadata_submit_io_test01),
- cmocka_unit_test(metadata_submit_io_test02),
- cmocka_unit_test(metadata_submit_io_test03),
- cmocka_unit_test(metadata_submit_io_test04)
- };
-
- print_message("Example template for tests\n");
-
- return cmocka_run_group_tests(tests, NULL, NULL);
-}
-