Initial commit

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga 2018-11-29 15:14:21 +01:00
commit a8e1ce8cc5
178 changed files with 35378 additions and 0 deletions

100
Makefile Normal file
View File

@ -0,0 +1,100 @@
#
# Copyright(c) 2012-2018 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
PWD:=$(shell pwd)
ifneq ($(strip $(O)),)
OUTDIR:=$(shell cd $(O) && pwd)
endif
validate:
ifeq ($(strip $(OUTDIR)),)
$(error No output specified for installing sources or headers)
endif
ifeq ($(strip $(CMD)),)
INSTALL=ln -s
else ifeq ($(strip $(CMD)),cp)
INSTALL=cp
else ifeq ($(strip $(CMD)),install)
INSTALL=install
else
$(error Not allowed program command)
endif
#
# Installing headers
#
INC_IN=$(shell find $(PWD)/inc -name '*.[h]' -type f)
INC_OUT=$(patsubst $(PWD)/inc/%,$(OUTDIR)/include/ocf/%,$(INC_IN))
INC_RM=$(shell find $(OUTDIR)/include/ocf -name '*.[h]' -xtype l 2>/dev/null)
inc: $(INC_OUT) $(INC_RM)
@$(MAKE) distcleandir
$(INC_OUT):
ifeq ($(strip $(OUTDIR)),)
$(error No output specified for installing headers)
endif
@echo " INSTALL $@"
@mkdir -p $(dir $@)
@$(INSTALL) $(subst $(OUTDIR)/include/ocf,$(PWD)/inc,$@) $@
$(INC_RM): validate
$(if $(shell readlink $@ | grep $(PWD)/inc), \
@echo " RM $@"; rm $@,)
#
# Installing sources
#
SRC_IN=$(shell find $(PWD)/src -name '*.[c|h]' -type f)
SRC_OUT=$(patsubst $(PWD)/src/%,$(OUTDIR)/src/ocf/%,$(SRC_IN))
SRC_RM=$(shell find $(OUTDIR)/src/ocf -name '*.[c|h]' -xtype l 2>/dev/null)
src: $(SRC_OUT) $(SRC_RM)
@$(MAKE) distcleandir
$(SRC_OUT):
ifeq ($(strip $(OUTDIR)),)
$(error No output specified for installing sources)
endif
@echo " INSTALL $@"
@mkdir -p $(dir $@)
@$(INSTALL) $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$@) $@
$(SRC_RM): validate
$(if $(shell readlink $@ | grep $(PWD)/src), \
@echo " RM $@"; rm $@,)
#
# Distclean
#
dist_dir=$(foreach dir,$(shell find $(OUTDIR) -type d -empty), \
$(if $(wildcard $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$(dir))),$(dir),))
distclean: validate
@rm -f $(SRC_OUT) $(INC_OUT)
@$(MAKE) distcleandir
distcleandir:
$(if $(strip $(dist_dir)), rm -r $(dist_dir),)
#
# Printing help
#
help:
$(info Available targets:)
$(info inc O=<OUTDIR> [CMD=cp|install] Install include files into specified directory)
$(info src O=<OUTDIR> [CMD=cp|install] Install source files into specified directory)
$(info distclean O=<OUTDIR> Uninstall source and headers from specified directory)
doc: validate
@cd doc && rm -rf html
@cd doc && doxygen doxygen.cfg
@mkdir -p $(OUTDIR)/doc
@cd doc && mv html $(OUTDIR)/doc/ocf
.PHONY: inc src validate help distclean distcleandir doc \
$(INC_RM) $(SRC_RM) $(DIST_DIR)

294
doc/README.md Normal file
View File

@ -0,0 +1,294 @@
# Open CAS Framework
# Content:
- [Architecture overview](#architecture-overview)
- [Management interface](#library-management)
- [IO path](#reading-and-writing-data)
# Architecture overview
Intel(R) Cache Acceleration Software (CAS) consists of:
- Platform independent library called Open CAS Framework (OCF)
- Platform dependent adaptation layers enabling OCF to work in different
environments such as Linux kernel
An example usage for OCF is Linux kernel (see picture below).
In this case OCF operates as block level cache for block devices.
For this usage model OCF comes with following adaptation layers:
- <b>Library client (top adapter)</b> - its main responsibility is creating
cache volume representing primary storage device. Application can
read/write from/to the cache volume block device as to regular primary
storage device.
- <b>Block device data object (bottom adapter)</b> - is responsible for issuing
IO operations to underlying block device.
A system administrator can manage cache instances via Intel CAS CLI management
utility called "casadm".
![OCF Linux deployment view](deployment-1.png)
Another example of OCF usage is user space block level cache for QEMU
(see picture below). In this example following adaptation layers may exist:
- <b>CAS virtIO-blk driver for QEMU (top adapter)</b> - it exposes
primary storage device (another virtIO driver) to guest OS via OCF library
- <b>virtIO-blk data object (bottom adapter)</b> - enables OCF to access
data on primary storage device or cache device via original virtIO driver
Please note that actual adapters depend on the environment where OCF is
meant to be run. There can be different bottom adapters delivered for cache device
and primary storage device. For example bottom adapter for caching device may
be implemented using kernel bypass techniques, providing low-latency access to
cache media.
![OCF deployment in QEMU example](deployment-2.png)
# Management interface
Management interface delivered with Intel OCF enables system administrator to:
- Configure OCF caching library to target environment, which includes installation
of required platform dependent adapters.
- Starting/stopping and managing existing cache instances.
- Performing observability functions (e.g. retrieving performance counters)
For more details please see below examples:
## Library initialization example
OCF enables possibility use it simultaneously from two independent libraries linked
into the same executable by means of concept of contexts. Each context has its own
set of operations which allow to handle specific data types used by data objects
within this context.
```c
#include "ocf.h"
/* Handle to library context */
ocf_ctx_t ctx;
/* Your context interface */
const struct ocf_ctx_ops ctx_ops = {
/* Fill your interface functions */
};
/* Your unique data object type IDs */
enum my_data_obj_type {
my_data_obj_type_1,
my_data_obj_type_2
};
/* Your data objects interface declaration */
const struct ocf_data_obj_ops my_data_obj_ops1 = {
.name = "My data object 1",
/* Fill your data object interface functions */
};
const struct ocf_data_obj_ops my_data_obj_ops2 = {
.name = "My data object 2"
/* Fill your data object interface functions */
};
int my_cache_init(void)
{
int result;
result ocf_ctx_init(&ctx, &ctx_ops)
if (result) {
/* Cannot initialze context of OCF library */
return result;
}
/* Initialization successful */
/* Now we can register data objects */
result |= ocf_ctx_register_data_obj_ops(ctx, &my_data_obj_ops1,
my_data_obj_type_1);
if (result) {
/* Cannot register data object interface */
goto err;
}
result |= ocf_ctx_register_data_obj_ops(ctx, &my_data_obj_ops2,
my_data_obj_type_2);
if (result) {
/* Cannot register data object interface */
goto err;
}
return 0;
err:
/* In case of failure we destroy context and propagate error code */
ocf_ctx_exit(ctx);
return result;
}
```
## Cache management
OCF library API provides management functions (@ref ocf_mngt.h). This
interface enables user to manage cache instances. Examples:
- Start cache
```c
int result;
ocf_cache_t cache; /* Handle to your cache */
struct ocf_mngt_cache_config cfg; /* Your cache configuration */
/* Prepare your cache configuration */
/* Configure cache mode */
cfg.cache_mode = ocf_cache_mode_wt;
/* Now tell how your cache will be initialzed. Selech warm or cold cache */
cfg.init_mode = ocf_init_mode_init;
cfg.uuid.data = "/path/to/your/cache/or/unique/id";
/* Specify cache data object type */
cfg.data_obj_type = my_data_obj_type_1;
/* Other cache configuration */
...
/* Start cache. */
result = ocf_mngt_cache_start(cas, &cache, cfg);
if (!result) {
/* Your cache was created successfully */
}
```
- Add core (primary storage device) to cache
```c
int result;
ocf_core_t core; /* Handle to your core */
struct ocf_mngt_core_config cfg; /* Your core configuration */
/* Prepare core configuration */
/* Select core data object type */
cfg.data_obj_type = my_data_obj_type_2;
/* Set UUID or path of your core */
cfg.uuid.data = "/path/to/your/core/or/unique/id";
result = ocf_mngt_cache_add_core(cache, &core, &cfg);
if (!result) {
/* Your core was added successfully */
}
```
## Management interface considerations
Each device (cache or core) is assigned with ID, either automatically by OCF or
explicitly specified by user. It is possible to retrieve handle to cache
instance via @ref ocf_cache_get_id. To get handle to core instance please
use @ref ocf_core_get_id.
Cache management operations are thread safe - it is possible to perform
cache management from many threads at a time. There is a possiblity to "batch"
several cache management operations and execute them under cache management
lock. To do this user needs to first obtain cache management lock, perform management
operations and finally release the lock. For reference see example below.
```c
int my_complex_work(ocf_cache_id_t cache_id,
ocf_core_id_t core_id)
{
int result;
ocf_cache_t cache; /* Handle to your cache */
ocf_core_t core; /* Handle to your core */
/* Get cache handle */
result = ocf_mngt_cache_get(cas, cache_id, &cache);
if (result)
return result;
/* Lock cache */
result = ocf_mngt_cache_lock(cache);
if (result) {
ocf_mngt_cache_put(cache);
return result;
}
/* Get core handle */
result = ocf_core_get(cache, core_id, &core);
if (result) {
result = -1;
goto END;
}
/* Cache is locked, you can perform your activities */
/* 1. Flush your core */
result = ocf_mngt_core_flush(cache, core_id, true);
if (result) {
goto END;
}
/* 2. Your others operations including internal actions */
/* 3. Removing core form cache */
result = ocf_mngt_cache_remove_core(cache, core_id, true);
END:
ocf_mngt_cache_unlock(cache); /* Remember to unlock cache */
ocf_mngt_cache_put(cache); /* Release cache referance */
return result;
}
```
# IO path
Please refer to below sequence diagram for detailed IO flow. Typical IO
path includes:
- <b>IO allocation</b> - creating new IO instance that will be submitted to OCF
for processing
- <b>IO configuration</b> - specifying address and length, IO class, flags and
completion function
- <b>IO submission</b> - actual IO submission to OCF. OCF will perform cache
lookup and based on its results will return data from cache or primary
storage device
- <b>IO completion</b> - is signalled by calling completion function specified
in IO configuration phase
![An example of IO flow](io-path.png)
## IO submission example
```c
#include "ocf.h"
void read_end(struct ocf_io *io, int error)
{
/* Your IO has been finished. Check the result and inform upper
* layers.
*/
/* Release IO */
ocf_io_put(io);
}
int read(ocf_core_t core, void *data, addr, uint32_t length)
{
/* Allocate IO */
struct ocf_io *io = ocf_new_io(core);
if (!io) {
/* Cannot allocate IO */
return -ENOMEM;
}
/* Configure IO, set address, flags, IO class, and etc... */
ocf_io_configure(io, addr, length, OCF_READ, 0, 0);
/* Set completion context and function */
ocf_io_set_cmpl(io, NULL, NULL, read_end);
/* Set data */
if (ocf_io_set_data(io, data, 0)) {
ocf_io_put(io);
return -EINVAL;
}
/* Send IO requests to the cache */
ocf_submit_io(io);
/* Just it */
return 0;
}
```

329
doc/doxygen.cfg Normal file
View File

@ -0,0 +1,329 @@
# Doxyfile 1.8.6
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "Open CAS Framework"
PROJECT_NUMBER =
PROJECT_BRIEF = OCF
PROJECT_LOGO = img/logo.png
OUTPUT_DIRECTORY = .
CREATE_SUBDIRS = NO
ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = NO
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 8
ALIASES =
TCL_SUBST =
OPTIMIZE_OUTPUT_FOR_C = YES
OPTIMIZE_OUTPUT_JAVA = NO
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
AUTOLINK_SUPPORT = YES
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
GROUP_NESTED_COMPOUNDS = NO
SUBGROUPING = YES
INLINE_GROUPED_CLASSES = NO
INLINE_SIMPLE_STRUCTS = NO
TYPEDEF_HIDES_STRUCT = NO
LOOKUP_CACHE_SIZE = 0
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = NO
EXTRACT_PRIVATE = NO
EXTRACT_PACKAGE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = NO
HIDE_SCOPE_NAMES = YES
HIDE_COMPOUND_REFERENCE= NO
SHOW_INCLUDE_FILES = YES
SHOW_GROUPED_MEMB_INC = NO
FORCE_LOCAL_INCLUDES = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_MEMBERS_CTORS_1ST = NO
SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
STRICT_PROTO_MATCHING = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE =
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_AS_ERROR = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = ../inc README.md
INPUT_ENCODING = UTF-8
FILE_PATTERNS = *.c \
*.h \
*.md
RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXCLUDE_SYMBOLS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = NO
IMAGE_PATH = ./img/
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
FILTER_SOURCE_PATTERNS =
USE_MDFILE_AS_MAINPAGE = README.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = YES
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
GENERATE_HTML = YES
HTML_OUTPUT = html
HTML_FILE_EXTENSION = .html
#HTML_HEADER = header.html
#HTML_FOOTER = footer.html
HTML_STYLESHEET =
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = NO
HTML_DYNAMIC_SECTIONS = NO
HTML_INDEX_NUM_ENTRIES = 100
GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_BUNDLE_ID = org.doxygen.Project
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
DOCSET_PUBLISHER_NAME = Publisher
GENERATE_HTMLHELP = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
CHM_INDEX_ENCODING =
BINARY_TOC = NO
TOC_EXPAND = NO
GENERATE_QHP = NO
QCH_FILE =
QHP_NAMESPACE = org.doxygen.Project
QHP_VIRTUAL_FOLDER = doc
QHP_CUST_FILTER_NAME =
QHP_CUST_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
QHG_LOCATION =
GENERATE_ECLIPSEHELP = NO
ECLIPSE_DOC_ID = org.doxygen.Project
DISABLE_INDEX = NO
GENERATE_TREEVIEW = NO
ENUM_VALUES_PER_LINE = 4
TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
FORMULA_TRANSPARENT = YES
USE_MATHJAX = NO
MATHJAX_FORMAT = HTML-CSS
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
MATHJAX_EXTENSIONS =
MATHJAX_CODEFILE =
SEARCHENGINE = YES
SERVER_BASED_SEARCH = NO
EXTERNAL_SEARCH = NO
SEARCHENGINE_URL =
SEARCHDATA_FILE = searchdata.xml
EXTERNAL_SEARCH_ID =
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4
EXTRA_PACKAGES =
LATEX_HEADER =
LATEX_FOOTER =
LATEX_EXTRA_STYLESHEET =
LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
LATEX_TIMESTAMP = NO
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_SUBDIR =
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
GENERATE_XML = NO
XML_OUTPUT = xml
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = YES
MSCGEN_PATH =
DIA_PATH =
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = NO
DOT_NUM_THREADS = 0
DOT_FONTNAME = Helvetica
DOT_FONTSIZE = 10
DOT_FONTPATH =
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
UML_LIMIT_NUM_FIELDS = 10
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
INTERACTIVE_SVG = NO
DOT_PATH =
DOTFILE_DIRS =
MSCFILE_DIRS =
DIAFILE_DIRS =
PLANTUML_JAR_PATH =
PLANTUML_INCLUDE_PATH =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES

BIN
doc/img/deployment-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
doc/img/deployment-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

BIN
doc/img/io-path.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

BIN
doc/img/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

49
inc/cleaning/acp.h Normal file
View File

@ -0,0 +1,49 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CLEANING_ACP_H__
#define __OCF_CLEANING_ACP_H__
/**
* @file
* @brief ACP cleaning policy API
*/
enum ocf_cleaning_acp_parameters {
ocf_acp_wake_up_time,
ocf_acp_flush_max_buffers,
};
/**
* @name ACP cleaning policy parameters
* @{
*/
/**
* ACP cleaning policy time between flushing cycles (in ms)
*/
/**< Wake up time minimum value */
#define OCF_ACP_MIN_WAKE_UP 0
/**< Wake up time maximum value */
#define OCF_ACP_MAX_WAKE_UP 10000
/**< Wake up time default value */
#define OCF_ACP_DEFAULT_WAKE_UP 10
/**
* ACP cleaning thread number of dirty cache lines to be flushed in one cycle
*/
/** Dirty cache lines to be flushed in one cycle minimum value */
#define OCF_ACP_MIN_FLUSH_MAX_BUFFERS 1
/** Dirty cache lines to be flushed in one cycle maximum value */
#define OCF_ACP_MAX_FLUSH_MAX_BUFFERS 10000
/** Dirty cache lines to be flushed in one cycle default value */
#define OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS 128
/**
* @}
*/
#endif /* __OCF_CLEANING_ACP_H__ */

74
inc/cleaning/alru.h Normal file
View File

@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CLEANING_ALRU_H__
#define __OCF_CLEANING_ALRU_H__
/**
* @file
* @brief ALRU cleaning policy API
*/
enum ocf_cleaning_alru_parameters {
ocf_alru_wake_up_time,
ocf_alru_stale_buffer_time,
ocf_alru_flush_max_buffers,
ocf_alru_activity_threshold,
};
/**
* @name ALRU cleaning policy parameters
* @{
*/
/**
* ALRU cleaning thread wake up time
*/
/** Wake up time minimum value */
#define OCF_ALRU_MIN_WAKE_UP 1
/** Wake up time maximum value */
#define OCF_ALRU_MAX_WAKE_UP 3600
/** Wake up time default value */
#define OCF_ALRU_DEFAULT_WAKE_UP 20
/**
* ALRU cleaning thread staleness time
*/
/** Staleness time minimum value */
#define OCF_ALRU_MIN_STALENESS_TIME 1
/** Staleness time maximum value */
#define OCF_ALRU_MAX_STALENESS_TIME 3600
/** Staleness time default value*/
#define OCF_ALRU_DEFAULT_STALENESS_TIME 120
/**
* ALRU cleaning thread number of dirty cache lines to be flushed in one cycle
*/
/** Dirty cache lines to be flushed in one cycle minimum value */
#define OCF_ALRU_MIN_FLUSH_MAX_BUFFERS 1
/** Dirty cache lines to be flushed in one cycle maximum value */
#define OCF_ALRU_MAX_FLUSH_MAX_BUFFERS 10000
/** Dirty cache lines to be flushed in one cycle default value */
#define OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS 100
/**
* ALRU cleaning thread cache idle time before flushing thread can start
*/
/** Idle time before flushing thread can start minimum value */
#define OCF_ALRU_MIN_ACTIVITY_THRESHOLD 500
/** Idle time before flushing thread can start maximum value */
#define OCF_ALRU_MAX_ACTIVITY_THRESHOLD 1000000
/** Idle time before flushing thread can start default value */
#define OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD 10000
/**
* @}
*/
#endif /* __OCF_CLEANING_ALRU_H__ */

37
inc/ocf.h Normal file
View File

@ -0,0 +1,37 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_H__
#define __OCF_H__
/**
* @file
* @brief Main OCF header
* This file doesn't contain any functions or structures.
* It's simply collective include file to allow OCF user include
* everything at once.
*/
#include "ocf_def.h"
#include "ocf_types.h"
#include "ocf_utilities.h"
#include "ocf_io.h"
#include "ocf_data_obj.h"
#include "ocf_cache.h"
#include "ocf_core.h"
#include "ocf_queue.h"
#include "ocf_cleaner.h"
#include "cleaning/alru.h"
#include "cleaning/acp.h"
#include "ocf_metadata.h"
#include "ocf_metadata_updater.h"
#include "ocf_io_class.h"
#include "ocf_stats.h"
#include "ocf_stats_builder.h"
#include "ocf_mngt.h"
#include "ocf_ctx.h"
#include "ocf_err.h"
#endif /* __OCF_H__ */

250
inc/ocf_cache.h Normal file
View File

@ -0,0 +1,250 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CACHE_H__
#define __OCF_CACHE_H__
/**
* @file
* @brief OCF cache API
*/
#include "ocf_types.h"
#include "ocf_data_obj.h"
#include "ocf_ctx.h"
#include "ocf_def.h"
/**
* @brief Cache info: configuration, status
*/
struct ocf_cache_info {
bool attached;
/*!< True if caching cache is attached to cache */
uint8_t data_obj_type;
/*!< Cache data object type */
uint32_t size;
/*!< Actual cache size (in cache lines) */
/* Statistics of inactive cores */
struct {
uint32_t occupancy;
/*!< Cache occupancy (in cache lines) */
uint32_t dirty;
/*!< Dirty blocks within cache (in cache lines) */
} inactive;
uint32_t occupancy;
/*!< Actual cache occupancy (in cache lines) */
uint32_t dirty;
/*!< Dirty blocks within cache (in cache lines) */
uint32_t dirty_initial;
/*!< Dirty blocks within cache that where there when switching
* out of WB mode
*/
uint32_t dirty_for;
/*!< How long there are dirty cache lines (in seconds) */
ocf_cache_mode_t cache_mode;
/*!< Current cache mode */
/* Statistics of fallback Pass Through */
struct {
int error_counter;
/*!< How many requests to cache failed because of IO error */
bool status;
/*!< Current cache mode is PT,
set as a result of reaching IO error threshold */
} fallback_pt;
uint8_t state;
/*!< Cache state (running/flushing/stopping etc...) */
ocf_eviction_t eviction_policy;
/*!< Eviction policy selected */
ocf_cleaning_t cleaning_policy;
/*!< Cleaning policy selected (alru/nop) */
ocf_cache_line_size_t cache_line_size;
/*!< Cache line size in KiB */
uint32_t flushed;
/*!< Number of block flushed in ongoing flush operation */
uint32_t core_count;
/*!< Number of core devices associated with this cache */
uint64_t metadata_footprint;
/*!< Metadata memory footprint (in bytes) */
uint32_t metadata_end_offset;
/*!< LBA offset where metadata ends (in 4KiB blocks) */
};
/**
* @brief Obtain data object from cache
*
* @param[in] cache Cache object
*
* @retval Data object, NULL if dettached.
*/
ocf_data_obj_t ocf_cache_get_data_object(ocf_cache_t cache);
/**
* @brief Get ID of given cache object
*
* @param[in] cache Cache object
*
* @retval Cache ID
*/
ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache);
/**
* @brief Get queue object associated with cache
*
* @param[in] cache Cache object
* @param[in] id Queue id
* @param[out] q Queue object
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_cache_get_queue(ocf_cache_t cache, unsigned id, ocf_queue_t *q);
/**
* @brief Set name of given cache object
*
* @param[in] cache Cache object
* @param[in] src Source of Cache name
* @param[in] src_size Size of src
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size);
/**
* @brief Get name of given cache object
*
* @param[in] cache Cache object
*
* @retval Cache name
*/
const char *ocf_cache_get_name(ocf_cache_t cache);
/**
* @brief Check is cache in incomplete state
*
* @param[in] cache Cache object
*
* @retval 1 Cache is in incomplete state
* @retval 0 Cache is in complete state
*/
bool ocf_cache_is_incomplete(ocf_cache_t cache);
/**
* @brief Check if caching device is attached
*
* @param[in] cache Cache object
*
* @retval 1 Caching device is attached
* @retval 0 Caching device is detached
*/
bool ocf_cache_is_device_attached(ocf_cache_t cache);
/**
* @brief Check if cache object is running
*
* @param[in] cache Cache object
*
* @retval 1 Caching device is being stopped
* @retval 0 Caching device is being stopped
*/
bool ocf_cache_is_running(ocf_cache_t cache);
/**
* @brief Get cache mode of given cache object
*
* @param[in] cache Cache object
*
* @retval Cache mode
*/
ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache);
/**
* @brief Get cache line size of given cache object
*
* @param[in] cache Cache object
*
* @retval Cache line size
*/
ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache);
/**
* @brief Convert bytes to cache lines
*
* @param[in] cache Cache object
* @param[in] bytes Number of bytes
*
* @retval Cache lines count
*/
uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes);
/**
* @brief Get core count of given cache object
*
* @param[in] cache Cache object
*
* @retval Core count
*/
uint32_t ocf_cache_get_core_count(ocf_cache_t cache);
/**
* @brief Get cache mode of given cache object
*
* @param[in] cache Cache object
* @param[out] info Cache info structure
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info);
/**
* @brief Get UUID of data object associated with cache
*
* @param[in] cache Cache object
*
* @retval Data object UUID, NULL if detached.
*/
const struct ocf_data_obj_uuid *ocf_cache_get_uuid(ocf_cache_t cache);
/**
* @brief Get OCF context of given cache object
*
* @param[in] cache Cache object
*
* @retval OCF context
*/
ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache);
/**
* @brief Get data object type id of given cache object
*
* @param[in] cache Cache object
*
* @retval data object type id, -1 if device detached
*/
uint8_t ocf_cache_get_type_id(ocf_cache_t cache);
#endif /* __OCF_CACHE_H__ */

36
inc/ocf_cfg.h Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CFG_H__
#define __OCF_CFG_H__
/**
* @file
* @brief OCF configuration file
*/
/**
* Configure maximum numbers of cores in cache instance
*/
#ifndef OCF_CONFIG_MAX_CORES
#define OCF_CONFIG_MAX_CORES 4096
#endif
/** Maximum number of IO classes that can be configured */
#ifndef OCF_CONFIG_MAX_IO_CLASSES
#define OCF_CONFIG_MAX_IO_CLASSES 33
#endif
#if OCF_CONFIG_MAX_IO_CLASSES > 256
#error "Limit of maximum number of IO classes exceeded"
#endif
/** Enabling debug statistics */
#ifndef OCF_CONFIG_DEBUG_STATS
#define OCF_CONFIG_DEBUG_STATS 0
#endif
#endif /* __OCF_CFG_H__ */

51
inc/ocf_cleaner.h Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_CLEANER_H_
#define OCF_CLEANER_H_
/**
* @file
* @brief OCF cleaner API for synchronization dirty data
*
*/
/**
* @brief Run cleaner
*
* @param[in] c Cleaner instance to run
* @param[in] io_queue I/O queue to which cleaner requests should be submitted
*
* @retval Hint when to run cleaner next time. Value expressed in miliseconds.
*/
uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue);
/**
* @brief Set cleaner private data
*
* @param[in] c Cleaner handle
* @param[in] priv Private data
*/
void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv);
/**
* @brief Get cleaner private data
*
* @param[in] c Cleaner handle
*
* @retval Cleaner private data
*/
void *ocf_cleaner_get_priv(ocf_cleaner_t c);
/**
* @brief Get cache instance to which cleaner belongs
*
* @param[in] c Cleaner handle
*
* @retval Cache instance
*/
ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c);
#endif

242
inc/ocf_core.h Normal file
View File

@ -0,0 +1,242 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/**
* @file
* @brief OCF core API
*/
#ifndef __OCF_CORE_H__
#define __OCF_CORE_H__
#include "ocf_types.h"
#include "ocf_data_obj.h"
#include "ocf_io.h"
#include "ocf_mngt.h"
/**
* @brief Obtain cache object from core
*
* @param[in] core Core object
*
* @retval Cache object
*/
ocf_cache_t ocf_core_get_cache(ocf_core_t core);
/**
* @brief Obtain data object associated with core
*
* @param[in] core Core object
*
* @retval Data object
*/
ocf_data_obj_t ocf_core_get_data_object(ocf_core_t core);
/**
* @brief Get UUID of data object associated with core
*
* @param[in] core Core object
*
* @retval Data object UUID
*/
static inline const struct ocf_data_obj_uuid *ocf_core_get_uuid(ocf_core_t core)
{
return ocf_data_obj_get_uuid(ocf_core_get_data_object(core));
}
/**
* @brief Asociate new UUID value with given core
*
* @param[in] core Core object
* @param[in] uuid new core uuid
*
* @retval Data object UUID
*/
int ocf_core_set_uuid(ocf_core_t core, const struct ocf_data_obj_uuid *uuid);
/**
* @brief Get sequential cutoff threshold of given core object
*
* @param[in] core Core object
*
* @retval Sequential cutoff threshold [B]
*/
uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core);
/**
* @brief Get sequential cutoff policy of given core object
*
* @param[in] core Core object
*
* @retval Sequential cutoff policy
*/
ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core);
/**
* @brief Get ID of given core object
*
* @param[in] core Core object
*
* @retval Core ID
*/
ocf_core_id_t ocf_core_get_id(ocf_core_t core);
/**
* @brief Set name of given core object
*
* @param[in] core Core object
* @param[in] src Source of Core name
* @param[in] src_size Size of src
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size);
/**
* @brief Get name of given core object
*
* @param[in] core Core object
*
* @retval Core name
*/
const char *ocf_core_get_name(ocf_core_t core);
/**
* @brief Get core state
*
* @param[in] core Core object
*
* @retval Core state
*/
ocf_core_state_t ocf_core_get_state(ocf_core_t core);
/**
* @brief Obtain core object of given ID from cache
*
* @param[in] cache Cache object
* @param[in] id Core ID
* @param[out] core Core object
*
* @retval 0 Success
* @retval Non-zero Core getting failed
*/
int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core);
/**
* @brief Set persistent user metadata for given core
*
* @param[in] core Core object
* @param[in] data User data buffer
* @param[in] size Size of user data buffer
*
* @retval 0 Success
* @retval Non-zero Core getting failed
*/
int ocf_core_set_user_metadata(ocf_core_t core, void *data, size_t size);
/**
* @brief Get persistent user metadata from given core
*
* @param[in] core Core object
* @param[out] data User data buffer
* @param[in] size Size of user data buffer
*
* @retval 0 Success
* @retval Non-zero Core getting failed
*/
int ocf_core_get_user_metadata(ocf_core_t core, void *data, size_t size);
/**
* @brief Allocate new ocf_io
*
* @param[in] core Core object
*
* @retval ocf_io object
*/
struct ocf_io *ocf_new_io(ocf_core_t core);
/**
* @brief Submit ocf_io
*
* @param[in] io IO to be submitted
* @param[in] mode Cache mode to be enforced
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode);
/**
* @brief Submit ocf_io
*
* @param[in] io IO to be submitted
*
* @retval 0 Success
* @retval Non-zero Fail
*/
static inline int ocf_submit_io(struct ocf_io *io)
{
return ocf_submit_io_mode(io, ocf_cache_mode_none);
}
/**
* @brief Fast path for submitting IO. If possible, request is processed
* immediately without adding to internal request queue
*
* @param[in] io IO to be submitted
*
* @retval 0 IO has been submitted successfully
* @retval Non-zero Fast submit failed. Try to submit IO with ocf_submit_io()
*/
int ocf_submit_io_fast(struct ocf_io *io);
/**
* @brief Submit ocf_io with flush command
*
* @param[in] io IO to be submitted
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_submit_flush(struct ocf_io *io);
/**
* @brief Submit ocf_io with discard command
*
* @param[in] io IO to be submitted
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_submit_discard(struct ocf_io *io);
/**
* @brief Core visitor function type which is called back when iterating over
* cores.
*
* @param[in] core Core which is currently iterated (visited)
* @param[in] cntx Visitor context
*
* @retval 0 continue visiting cores
* @retval Non-zero stop iterating and return result
*/
typedef int (*ocf_core_visitor_t)(ocf_core_t core, void *cntx);
/**
* @brief Run visitor function for each core of given cache
*
* @param[in] cache OCF cache instance
* @param[in] visitor Visitor function
* @param[in] cntx Visitor context
* @param[in] only_opened Visit only opened cores
*
* @retval 0 Success
* @retval Non-zero Fail
*/
int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx,
bool only_opened);
#endif /* __OCF_CORE_H__ */

356
inc/ocf_ctx.h Normal file
View File

@ -0,0 +1,356 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_CTX_H__
#define __OCF_CTX_H__
/**
* @file
* @brief OCF library context API
*/
#include "ocf_types.h"
#include "ocf_data_obj.h"
#include "ocf_logger.h"
/**
* @brief Seeking start position in environment data buffer
*/
typedef enum {
ctx_data_seek_begin,
/*!< Seeking from the beginning of environment data buffer */
ctx_data_seek_current,
/*!< Seeking from current position in environment data buffer */
} ctx_data_seek_t;
/**
* @brief OCF context specific operation
*/
struct ocf_ctx_ops {
/**
* @brief The name of the environment which provides platform
* interface for cache engine
*/
const char *name;
/**
* @name Context data buffer operations
* @{
*/
/**
* @brief Allocate contest data buffer
*
* @param[in] pages The size of data buffer in pages
*
* @return Context data buffer
*/
ctx_data_t *(*data_alloc)(uint32_t pages);
/**
* @brief Free context data buffer
*
* @param[in] data Contex data buffer which shall be freed
*/
void (*data_free)(ctx_data_t *data);
/**
* @brief Lock context data buffer to disable swap-out
*
* @param[in] data Contex data buffer which shall be locked
*
* @retval 0 Memory locked successfully
* @retval Non-zero Memory locking failure
*/
int (*data_mlock)(ctx_data_t *data);
/**
* @brief Unlock context data buffer
*
* @param[in] data Contex data buffer which shall be unlocked
*/
void (*data_munlock)(ctx_data_t *data);
/**
* @brief Read from environment data buffer into raw data buffer
*
* @param[in,out] dst Destination raw memory buffer
* @param[in] src Source context data buffer
* @param[in] size Number of bytes to be read
*
* @return Number of read bytes
*/
uint32_t (*data_rd)(void *dst, ctx_data_t *src, uint32_t size);
/**
* @brief Write raw data buffer into context data buffer
*
* @param[in,out] dst Destination context data buffer
* @param[in] src Source raw memory buffer
* @param[in] size Number of bytes to be written
*
* @return Number of written bytes
*/
uint32_t (*data_wr)(ctx_data_t *dst, const void *src, uint32_t size);
/**
* @brief Zero context data buffer
*
* @param[in,out] dst Destination context data buffer to be zeroed
* @param[in] size Number of bytes to be zeroed
*
* @return Number of zeroed bytes
*/
uint32_t (*data_zero)(ctx_data_t *dst, uint32_t size);
/**
* @brief Seek read/write head in context data buffer for specified
* offset
*
* @param[in,out] dst Destination context data buffer to be seek
* @param[in] seek Seek beginning offset
* @param[in] size Number of bytes to be seek
*
* @return Number of seek bytes
*/
uint32_t (*data_seek)(ctx_data_t *dst,
ctx_data_seek_t seek, uint32_t size);
/**
* @brief Copy context data buffer content
*
* @param[in,out] dst Destination context data buffer
* @param[in] src Source context data buffer
* @param[in] to Starting offset in destination buffer
* @param[in] from Starting offset in source buffer
* @param[in] bytes Number of bytes to be copied
*
* @return Number of bytes copied
*/
uint64_t (*data_cpy)(ctx_data_t *dst, ctx_data_t *src,
uint64_t to, uint64_t from, uint64_t bytes);
/**
* @brief Erase content of data buffer
*
* @param[in] dst Contex data buffer which shall be erased
*/
void (*data_secure_erase)(ctx_data_t *dst);
/**
* @}
*/
/**
* @name I/O queue operations
* @{
*/
/**
* @brief Initialize I/O queue.
*
* This function should create worker, thread or any other queue
* processing related stuff specific to given environment.
*
* @param[in] q I/O queue to be initialized
*
* @retval 0 I/O queue has been initializaed successfully
* @retval Non-zero I/O queue initialization failure
*/
int (*queue_init)(ocf_queue_t q);
/**
* @brief Kick I/O queue processing
*
* This function should inform worker, thread or any other queue
* processing mechanism, that there are new requests in queue to
* be processed. Processing requests inside current call is not allowed.
*
* @param[in] q I/O queue to be kicked
*/
void (*queue_kick)(ocf_queue_t q);
/**
* @brief Kick I/O queue processing
*
* This function should inform worker, thread or any other queue
* processing mechanism, that there are new requests in queue to
* be processed. Kick function is allowed to process requests in current
* call
*
* @param[in] q I/O queue to be kicked
*/
void (*queue_kick_sync)(ocf_queue_t q);
/**
* @brief Stop I/O queue
*
* @param[in] q I/O queue beeing stopped
*/
void (*queue_stop)(ocf_queue_t q);
/**
* @}
*/
/**
* @name Cleaner operations
* @{
*/
/**
* @brief Initialize cleaner.
*
* This function should create worker, thread, timer or any other
* mechanism responsible for calling cleaner routine.
*
* @param[in] c Descriptor of cleaner to be initialized
*
* @retval 0 Cleaner has been initializaed successfully
* @retval Non-zero Cleaner initialization failure
*/
int (*cleaner_init)(ocf_cleaner_t c);
/**
* @brief Stop cleaner
*
* @param[in] c Descriptor of cleaner beeing stopped
*/
void (*cleaner_stop)(ocf_cleaner_t c);
/**
* @}
*/
/**
* @name Metadata updater operations
* @{
*/
/**
* @brief Initialize metadata updater.
*
* This function should create worker, thread, timer or any other
* mechanism responsible for calling metadata updater routine.
*
* @param[in] mu Handle to metadata updater to be initialized
*
* @retval 0 Metadata updater has been initializaed successfully
* @retval Non-zero I/O queue initialization failure
*/
int (*metadata_updater_init)(ocf_metadata_updater_t mu);
/**
* @brief Kick metadata updater processing
*
* This function should inform worker, thread or any other mechanism,
* that there are new metadata requests to be processed.
*
* @param[in] mu Metadata updater to be kicked
*/
void (*metadata_updater_kick)(ocf_metadata_updater_t mu);
/**
* @brief Stop metadata updater
*
* @param[in] mu Metadata updater beeing stopped
*/
void (*metadata_updater_stop)(ocf_metadata_updater_t mu);
/**
* @}
*/
};
/**
* @brief Register data object interface
*
* @note Type of data object operations is unique and cannot be repeated.
*
* @param[in] ctx OCF context
* @param[in] properties Reference to data object properties
* @param[in] type_id Type id of data object operations
*
* @retval 0 Data object operations registered successfully
* @retval Non-zero Data object registration failure
*/
int ocf_ctx_register_data_obj_type(ocf_ctx_t ctx, uint8_t type_id,
const struct ocf_data_obj_properties *properties);
/**
* @brief Unregister data object interface
*
* @param[in] ctx OCF context
* @param[in] type_id Type id of data object operations
*/
void ocf_ctx_unregister_data_obj_type(ocf_ctx_t ctx, uint8_t type_id);
/**
* @brief Get data object type operations by type id
*
* @param[in] ctx OCF context
* @param[in] type_id Type id of data object operations which were registered
*
* @return Data object type
* @retval NULL When data object operations were not registered
* for requested type
*/
ocf_data_obj_type_t ocf_ctx_get_data_obj_type(ocf_ctx_t ctx, uint8_t type_id);
/**
* @brief Get data object type id by type
*
* @param[in] ctx OCF context
* @param[in] type Type of data object operations which were registered
*
* @return Data object type id
* @retval -1 When data object operations were not registered
* for requested type
*/
int ocf_ctx_get_data_obj_type_id(ocf_ctx_t ctx, ocf_data_obj_type_t type);
/**
* @brief Create data object of given type
*
* @param[in] ctx handle to object designating ocf context
* @param[out] obj data object handle
* @param[in] uuid OCF data object UUID
* @param[in] type_id cache/core object type id
*
* @return Zero when success, othewise en error
*/
int ocf_ctx_data_obj_create(ocf_ctx_t ctx, ocf_data_obj_t *obj,
struct ocf_data_obj_uuid *uuid, uint8_t type_id);
/**
* @brief Set OCF context logger
*
* @param[in] ctx OCF context
* @param[in] logger Structure describing logger
*
* @return Zero when success, otherwise an error
*/
int ocf_ctx_set_logger(ocf_ctx_t ctx, const struct ocf_logger *logger);
/**
* @brief Initialize OCF context
*
* @param[out] ctx OCF context
* @param[in] ops OCF context operations
*
* @return Zero when success, otherwise an error
*/
int ocf_ctx_init(ocf_ctx_t *ctx, const struct ocf_ctx_ops *ops);
/**
* @brief De-Initialize OCF context
*
* @param[in] ctx OCF context
*
* @note Precondition is stopping all cache instances
*
* @return Zero when success, otherwise an error
*/
int ocf_ctx_exit(ocf_ctx_t ctx);
#endif /* __OCF_CTX_H__ */

253
inc/ocf_data_obj.h Normal file
View File

@ -0,0 +1,253 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_DATA_OBJ_H__
#define __OCF_DATA_OBJ_H__
/**
* @file
* @brief OCF data object API
*/
#include "ocf_types.h"
struct ocf_io;
/**
* @brief OCF data object UUID maximum allowed size
*/
#define OCF_DATA_OBJ_UUID_MAX_SIZE (4096UL - sizeof(uint32_t))
/**
* @brief OCF data object UUID
*/
struct ocf_data_obj_uuid {
size_t size;
/*!< UUID data size */
const void *data;
/*!< UUID data content */
};
/**
* @brief This structure describes data object capabilities
*/
struct ocf_data_obj_caps {
uint32_t atomic_writes : 1;
/*!< Data object supports atomic writes */
};
/**
* @brief OCF data object interface declaration
*/
struct ocf_data_obj_ops {
/**
* @brief Allocate new IO for this data object
*
* @param[in] obj Data object for which IO is created
* @return IO On success
* @return NULL On failure
*/
struct ocf_io *(*new_io)(ocf_data_obj_t obj);
/**
* @brief Submit IO on this data object
*
* @param[in] io IO to be submitted
*/
void (*submit_io)(struct ocf_io *io);
/**
* @brief Submit IO with flush command
*
* @param[in] io IO to be submitted
*/
void (*submit_flush)(struct ocf_io *io);
/**
* @brief Submit IO with metadata
*
* @param[in] io IO to be submitted
*/
void (*submit_metadata)(struct ocf_io *io);
/**
* @brief Submit IO with discard command
*
* @param[in] io IO to be submitted
*/
void (*submit_discard)(struct ocf_io *io);
/**
* @brief Submit operation to write zeroes to target address (including
* metadata extended LBAs in atomic mode)
*
* @param[in] io IO description (addr, size)
*/
void (*submit_write_zeroes)(struct ocf_io *io);
/**
* @brief Open data object
*
* @note This function performs data object initialization and should
* be called before any other operation on data object
*
* @param[in] obj Data object
*/
int (*open)(ocf_data_obj_t obj);
/**
* @brief Close data object
*
* @param[in] obj Data object
*/
void (*close)(ocf_data_obj_t obj);
/**
* @brief Close data object
*
* @param[in] obj Data object
*/
unsigned int (*get_max_io_size)(ocf_data_obj_t obj);
/**
* @brief Close data object
*
* @param[in] obj Data object
*/
uint64_t (*get_length)(ocf_data_obj_t obj);
};
/**
* @brief This structure describes data object properties
*/
struct ocf_data_obj_properties {
const char *name;
/*!< The name of data object operations */
uint32_t io_context_size;
/*!< Size of io context structure */
struct ocf_data_obj_caps caps;
/*!< Data object capabilities */
struct ocf_data_obj_ops ops;
/*!< Data object operations */
};
/**
* @brief Get data object type
*
* @param[in] obj Data object
*
* @return Data object type
*/
ocf_data_obj_type_t ocf_data_obj_get_type(ocf_data_obj_t obj);
/**
* @brief Get private context of data object
*
* @param[in] obj Data object
*
* @return Data object private context
*/
void *ocf_data_obj_get_priv(ocf_data_obj_t obj);
/**
* @brief Set private context for data object
*
* @param[in] obj Data object
* @param[in] priv Data object private context to be set
*/
void ocf_data_obj_set_priv(ocf_data_obj_t obj, void *priv);
/**
* @brief Get data object UUID
*
* @param[in] obj Data object
*
* @return UUID of data object
*/
const struct ocf_data_obj_uuid *ocf_data_obj_get_uuid(ocf_data_obj_t obj);
/**
* @brief Get data object length
*
* @param[in] obj Data object
*
* @return Length of data object in bytes
*/
uint64_t ocf_data_obj_get_length(ocf_data_obj_t obj);
/**
* @brief Get cache handle for given data object
*
* @param obj data object handle
*
* @return Handle to cache for which data object belongs to
*/
ocf_cache_t ocf_data_obj_get_cache(ocf_data_obj_t obj);
/**
* @brief Initialize data object
*
* @param[in] obj data object handle
* @param[in] type cache/core object type
* @param[in] uuid OCF data object UUID
* @param[in] uuid_copy crate copy of uuid data
*
* @return Zero when success, othewise en error
*/
int ocf_data_obj_init(ocf_data_obj_t obj, ocf_data_obj_type_t type,
struct ocf_data_obj_uuid *uuid, bool uuid_copy);
/**
* @brief Deinitialize data object
*
* @param[in] obj data object handle
*/
void ocf_data_obj_deinit(ocf_data_obj_t obj);
/**
* @brief Allocate and initialize data object
*
* @param[out] obj pointer to data object handle
* @param[in] type cache/core object type
* @param[in] uuid OCF data object UUID
*
* @return Zero when success, othewise en error
*/
int ocf_data_obj_create(ocf_data_obj_t *obj, ocf_data_obj_type_t type,
struct ocf_data_obj_uuid *uuid);
/**
* @brief Deinitialize and free data object
*
* @param[in] obj data object handle
*/
void ocf_data_obj_destroy(ocf_data_obj_t obj);
/**
* @brief Allocate new io from data object allocator
*
* @param[in] obj data object handle
*/
struct ocf_io *ocf_data_obj_new_io(ocf_data_obj_t obj);
/**
* @brief Delete io from data object allocator
*
* @param[in] io handle to previously allocated io
*/
void ocf_data_obj_del_io(struct ocf_io* io);
/**
* @brief Return io context data
*
* @param[in] io ocf io handle
*/
void *ocf_data_obj_get_data_from_io(struct ocf_io* io);
#endif /* __OCF_DATA_OBJ_H__ */

325
inc/ocf_def.h Normal file
View File

@ -0,0 +1,325 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_DEF_H__
#define __OCF_DEF_H__
#include "ocf_cfg.h"
/**
* @file
* @brief OCF definitions
*/
/**
* @name OCF cache definitions
*/
/**
* Minimum value of a valid cache ID
*/
#define OCF_CACHE_ID_MIN 1
/**
* Maximum value of a valid cache ID
*/
#define OCF_CACHE_ID_MAX 16384
/**
* Invalid value of cache id
*/
#define OCF_CACHE_ID_INVALID 0
/**
* Minimum cache size in bytes
*/
#define OCF_CACHE_SIZE_MIN (100 * MiB)
/**
* Size of cache name
*/
#define OCF_CACHE_NAME_SIZE 32
/**
* Value to turn off fallback pass through
*/
#define OCF_CACHE_FALLBACK_PT_INACTIVE 0
/**
* Minimum value of io error threshold
*/
#define OCF_CACHE_FALLBACK_PT_MIN_ERROR_THRESHOLD \
OCF_CACHE_FALLBACK_PT_INACTIVE
/**
* Maximum value of io error threshold
*/
#define OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD 1000000
/**
* @}
*/
/**
* @name OCF cores definitions
*/
/**
* Maximum numbers of cores per cache instance
*/
#define OCF_CORE_MAX OCF_CONFIG_MAX_CORES
/**
* Minimum value of a valid core ID
*/
#define OCF_CORE_ID_MIN 0
/**
* Maximum value of a valid core ID
*/
#define OCF_CORE_ID_MAX (OCF_CORE_MAX - 1)
/**
* Invalid value of core id
*/
#define OCF_CORE_ID_INVALID OCF_CORE_MAX
/**
* Size of core name
*/
#define OCF_CORE_NAME_SIZE 32
/**
* Minimum value of valid core sequence number
*/
#define OCF_SEQ_NO_MIN 1
/**
* Maximum value of a valid core sequence number
*/
#define OCF_SEQ_NO_MAX (65535UL)
/*
* Invalid value of core sequence number
*/
#define OCF_SEQ_NO_INVALID 0
/**
* @}
*/
/**
* @name Miscellaneous defines
* @{
*/
#define KiB (1ULL << 10)
#define MiB (1ULL << 20)
#define GiB (1ULL << 30)
#if OCF_CONFIG_DEBUG_STATS == 1
/** Macro which indicates that extended debug statistics shall be on*/
#define OCF_DEBUG_STATS
#endif
/**
* @}
*/
/**
* This Enumerator describes OCF cache instance state
*/
typedef enum {
ocf_cache_state_running = 0, //!< ocf_cache_state_running
/*!< OCF is currently running */
ocf_cache_state_stopping = 1, //!< ocf_cache_state_stopping
/*!< OCF cache instance is stopping */
ocf_cache_state_initializing = 2, //!< ocf_cache_state_initializing
/*!< OCF cache instance during initialization */
ocf_cache_state_incomplete = 3, //!< ocf_cache_state_incomplete
/*!< OCF cache has at least one inactive core */
ocf_cache_state_max //!< ocf_cache_state_max
/*!< Stopper of cache state enumerator */
} ocf_cache_state_t;
/**
* This Enumerator describes OCF core instance state
*/
typedef enum {
ocf_core_state_active = 0,
/*!< Core is active */
ocf_core_state_inactive,
/*!< Core is inactive (not attached) */
ocf_core_state_max,
/*!< Stopper of core state enumerator */
} ocf_core_state_t;
/**
* OCF supported cache modes
*/
typedef enum {
ocf_cache_mode_wt = 0,
/*!< Write-through cache mode */
ocf_cache_mode_wb,
/*!< Write-back cache mode */
ocf_cache_mode_wa,
/*!< Write-around cache mode */
ocf_cache_mode_pt,
/*!< Pass-through cache mode */
ocf_cache_mode_wi,
/*!< Write invalidate cache mode */
ocf_cache_mode_max,
/*!< Stopper of cache mode enumerator */
ocf_cache_mode_default = ocf_cache_mode_wt,
/*!< Default cache mode */
ocf_cache_mode_none = -1,
/*!< Current cache mode of given cache instance */
} ocf_cache_mode_t;
typedef enum {
ocf_seq_cutoff_policy_always = 0,
/*!< Sequential cutoff always on */
ocf_seq_cutoff_policy_full,
/*!< Sequential cutoff when occupancy is 100% */
ocf_seq_cutoff_policy_never,
/*!< Sequential cutoff disabled */
ocf_seq_cutoff_policy_max,
/*!< Stopper of sequential cutoff policy enumerator */
ocf_seq_cutoff_policy_default = ocf_seq_cutoff_policy_full,
/*!< Default sequential cutoff policy*/
} ocf_seq_cutoff_policy;
/**
* OCF supported eviction types
*/
typedef enum {
ocf_eviction_lru = 0,
/*!< Last recently used eviction policy */
ocf_eviction_max,
/*!< Stopper of enumerator */
ocf_eviction_default = ocf_eviction_lru,
/*!< Default eviction policy */
} ocf_eviction_t;
/**
* OCF supported Write-Back cleaning policies type
*/
typedef enum {
ocf_cleaning_nop = 0,
/*!< Cleaning won't happen in background. Only on eviction or
* during cache stop
*/
ocf_cleaning_alru,
/*!< Approximately recently used. Cleaning thread in the
* background enabled which cleans dirty data during IO
* inactivity.
*/
ocf_cleaning_acp,
/*!< Cleaning algorithm attempts to reduce core device seek
* distance. Cleaning thread runs concurrently with I/O.
*/
ocf_cleaning_max,
/*!< Stopper of enumerator */
ocf_cleaning_default = ocf_cleaning_alru,
/*!< Default cleaning policy type */
} ocf_cleaning_t;
/**
* OCF supported cache line sizes in bytes
*/
typedef enum {
ocf_cache_line_size_4 = 4 * KiB,
/*!< 4 kiB */
ocf_cache_line_size_8 = 8 * KiB,
/*!< 8 kiB */
ocf_cache_line_size_16 = 16 * KiB,
/*!< 16 kiB */
ocf_cache_line_size_32 = 32 * KiB,
/*!< 32 kiB */
ocf_cache_line_size_64 = 64 * KiB,
/*!< 64 kiB */
ocf_cache_line_size_default = ocf_cache_line_size_4,
/*!< Default cache line size */
ocf_cache_line_size_min = ocf_cache_line_size_4,
/*!< Minimum cache line size */
ocf_cache_line_size_max = ocf_cache_line_size_64,
/*!< Maximal cache line size */
ocf_cache_line_size_inf = ~0ULL,
/*!< Force enum to be 64-bit */
} ocf_cache_line_size_t;
/**
* Metadata layout
*/
typedef enum {
ocf_metadata_layout_striping = 0,
ocf_metadata_layout_seq = 1,
ocf_metadata_layout_max,
ocf_metadata_layout_default = ocf_metadata_layout_striping
} ocf_metadata_layout_t;
/**
* @name OCF IO class definitions
*/
/**
* Maximum numbers of IO classes per cache instance
*/
#define OCF_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES
/**
* Minimum value of a valid IO class ID
*/
#define OCF_IO_CLASS_ID_MIN 0
/**
* Maximum value of a valid IO class ID
*/
#define OCF_IO_CLASS_ID_MAX (OCF_IO_CLASS_MAX - 1)
/**
* Invalid value of IO class id
*/
#define OCF_IO_CLASS_INVALID OCF_IO_CLASS_MAX
/** Maximum size of the IO class name */
#define OCF_IO_CLASS_NAME_MAX 33
/** IO class priority which indicates pinning */
#define OCF_IO_CLASS_PRIO_PINNED -1
/** The highest IO class priority */
#define OCF_IO_CLASS_PRIO_HIGHEST 0
/** The lowest IO class priority */
#define OCF_IO_CLASS_PRIO_LOWEST 255
/** Default IO class priority */
#define OCF_IO_CLASS_PRIO_DEFAULT OCF_IO_CLASS_PRIO_LOWEST
/**
* @}
*/
/**
* @name I/O operations
* @{
*/
#define OCF_READ 0
#define OCF_WRITE 1
/**
* @}
*/
#define MAX_TRIM_RQ_SIZE (1 * MiB)
#endif /* __OCF_DEF_H__ */

97
inc/ocf_err.h Normal file
View File

@ -0,0 +1,97 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_ERR_H__
#define __OCF_ERR_H__
/**
* @file
* @brief OCF error codes definitions
*/
/**
* @brief OCF error enumerator
*/
typedef enum {
/** Invalid input parameter value */
OCF_ERR_INVAL = 1000000,
/** Invalid data object type */
OCF_ERR_INVAL_DATA_OBJ_TYPE,
/** Operation interrupted */
OCF_ERR_INTR,
/** Unknown error occurred */
OCF_ERR_UNKNOWN,
/*!< To many caches */
OCF_ERR_TOO_MANY_CACHES,
/** Out of memory */
OCF_ERR_NO_MEM,
/** Not enough RAM to start cache */
OCF_ERR_NO_FREE_RAM,
/** Start cache failure */
OCF_ERR_START_CACHE_FAIL,
/** Cache is busy */
OCF_ERR_CACHE_IN_USE,
/** Cache ID does not exist */
OCF_ERR_CACHE_NOT_EXIST,
/** Cache ID already exists */
OCF_ERR_CACHE_EXIST,
/** Too many core devices in cache */
OCF_ERR_TOO_MANY_CORES,
/** Core device not available */
OCF_ERR_CORE_NOT_AVAIL,
/** Cannot open device exclusively*/
OCF_ERR_NOT_OPEN_EXC,
/** Cache device not available */
OCF_ERR_CACHE_NOT_AVAIL,
/** IO Class does not exist */
OCF_ERR_IO_CLASS_NOT_EXIST,
/** Error while writing to cache device */
OCF_ERR_WRITE_CACHE,
/** Error while writing to core device */
OCF_ERR_WRITE_CORE,
/*!< Dirty shutdown */
OCF_ERR_DIRTY_SHUTDOWN,
/** Cache contains dirty data */
OCF_ERR_DIRTY_EXISTS,
/** Flushing of core interrupted */
OCF_ERR_FLUSHING_INTERRUPTED,
/** Adding core to core pool failed */
OCF_ERR_CANNOT_ADD_CORE_TO_POOL,
/** Cache is in incomplete state */
OCF_ERR_CACHE_IN_INCOMPLETE_STATE,
/** Core device is in inactive state */
OCF_ERR_CORE_IN_INACTIVE_STATE,
/** Invalid cache mode */
OCF_ERR_INVALID_CACHE_MODE,
/** Invalid cache line size */
OCF_ERR_INVALID_CACHE_LINE_SIZE,
} ocf_error_t;
#endif /* __OCF_ERR_H__ */

336
inc/ocf_io.h Normal file
View File

@ -0,0 +1,336 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_IO_H__
#define __OCF_IO_H__
#include "ocf_types.h"
/**
* @file
* @brief OCF IO definitions
*/
struct ocf_io;
/**
* @brief OCF IO legacy completion
*
* @note This type of completion is for legacy completion type
*
* @param[in] private_data Private data for completion function
* @param[in] error Completion status code
*/
typedef void (*ocf_end_t)(void *private_data, int error);
/**
* @brief OCF IO start
*
* @note OCF IO start notification callback
*
* @param[in] io OCF IO being started
*/
typedef void (*ocf_start_io_t)(struct ocf_io *io);
/**
* @brief OCF IO handle
*
* @note OCF IO handle callback
*
* @param[in] io OCF IO to handle
*/
typedef void (*ocf_handle_io_t)(struct ocf_io *io, void *opaque);
/**
* @brief OCF IO completion
*
* @note Completion function for OCF IO
*
* @param[in] io OCF IO being completed
* @param[in] error Completion status code
*/
typedef void (*ocf_end_io_t)(struct ocf_io *io, int error);
/**
* @brief OCF IO main structure
*/
struct ocf_io {
/**
* @brief OCF IO destination data object
*/
ocf_data_obj_t obj;
/**
* @brief Operations set for this OCF IO
*/
const struct ocf_io_ops *ops;
/**
* @brief OCF IO destination address
*/
uint64_t addr;
/**
* @brief OCF IO flags
*/
uint64_t flags;
/**
* @brief OCF IO size in bytes
*/
uint32_t bytes;
/**
* @brief OCF IO destination class
*/
uint32_t class;
/**
* @brief OCF IO direction
*/
uint32_t dir;
/**
* @brief Queue id
*/
uint32_t io_queue;
/**
* @brief OCF IO start function
*/
ocf_start_io_t start;
/**
* @brief OCF IO handle function
*/
ocf_handle_io_t handle;
/**
* @brief OCF IO completion function
*/
ocf_end_io_t end;
/**
* @brief OCF IO private 1
*/
void *priv1;
/**
* @brief OCF IO private 2
*/
void *priv2;
};
/**
* @brief OCF IO operations set structure
*/
struct ocf_io_ops {
/**
* @brief Set up data vector in OCF IO
*
* @param[in] io OCF IO to set up
* @param[in] data Source context data
* @param[in] offset Data offset in source context data
*
* @retval 0 Data set up successfully
* @retval Non-zero Data set up failure
*/
int (*set_data)(struct ocf_io *io, ctx_data_t *data,
uint32_t offset);
/**
* @brief Get context data from OCF IO
*
* @param[in] io OCF IO to get data
*
* @return Data vector from IO
*/
ctx_data_t *(*get_data)(struct ocf_io *io);
/**
* @brief Increase reference counter in OCF IO
*
* @param[in] io OCF IO
*/
void (*get)(struct ocf_io *io);
/**
* @brief Decrease reference counter in OCF IO
*
* @note If IO don't have any reference - deallocate it
*
* @param[in] io OCF IO
*/
void (*put)(struct ocf_io *io);
};
/**
* @brief Configure OCF IO
*
* @param[in] io OCF IO
* @param[in] addr OCF IO destination address
* @param[in] bytes OCF IO size in bytes
* @param[in] dir OCF IO direction
* @param[in] class OCF IO destination class
* @param[in] flags OCF IO flags
*/
static inline void ocf_io_configure(struct ocf_io *io, uint64_t addr,
uint32_t bytes, uint32_t dir, uint32_t class, uint64_t flags)
{
io->addr = addr;
io->bytes = bytes;
io->class = class;
io->flags = flags;
io->dir = dir;
}
/**
* @brief Increase reference counter in OCF IO
*
* @note Wrapper for get IO operation
*
* @param[in] io OCF IO
*/
static inline void ocf_io_get(struct ocf_io *io)
{
io->ops->get(io);
}
/**
* @brief Decrease reference counter in OCF IO
*
* @note If IO don't have any reference - deallocate it
*
* @param[in] io OCF IO
*/
static inline void ocf_io_put(struct ocf_io *io)
{
io->ops->put(io);
}
/**
* @brief Set OCF IO completion function
*
* @param[in] io OCF IO
* @param[in] context Context for completion function
* @param[in] fn Completion function
*/
static inline void ocf_io_set_cmpl(struct ocf_io *io, void *context,
void *context2, ocf_end_io_t fn)
{
io->priv1 = context;
io->priv2 = context2;
io->end = fn;
}
/**
* @brief Set OCF IO start function
*
* @param[in] io OCF IO
* @param[in] fn Start callback function
*/
static inline void ocf_io_set_start(struct ocf_io *io, ocf_start_io_t fn)
{
io->start = fn;
}
/**
* @brief Set OCF IO handle function
*
* @param[in] io OCF IO
* @param[in] fn Handle callback function
*/
static inline void ocf_io_set_handle(struct ocf_io *io, ocf_handle_io_t fn)
{
io->handle = fn;
}
/**
* @brief Call default completion function
*
* @note It is helper function for legacy completion functions
*
* @param[in] io OCF IO
* @param[in] error Completion status code
*/
static inline void ocf_io_end_default(struct ocf_io *io, int error)
{
ocf_end_t end = io->priv2;
end(io->priv1, error);
ocf_io_put(io);
}
/**
* @brief Set OCF IO default completion function
*
* @note This type of completion is for legacy completion type
*
* @param[in] io OCF IO
* @param[in] context Context for completion function
* @param[in] fn Completion function
*/
static inline void ocf_io_set_default_cmpl(struct ocf_io *io, void *context,
ocf_end_t fn)
{
io->priv1 = context;
io->priv2 = fn;
io->end = ocf_io_end_default;
}
/**
* @brief Set up data vector in OCF IO
*
* @note Wrapper for set up data vector function
*
* @param[in] io OCF IO to set up
* @param[in] data Source data vector
* @param[in] offset Data offset in source data vector
*
* @retval 0 Data set up successfully
* @retval Non-zero Data set up failure
*/
static inline int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data,
uint32_t offset)
{
return io->ops->set_data(io, data, offset);
}
/**
* @brief Get data vector from OCF IO
*
* @note Wrapper for get data vector function
*
* @param[in] io OCF IO to get data
*
* @return Data vector from IO
*/
static inline ctx_data_t *ocf_io_get_data(struct ocf_io *io)
{
return io->ops->get_data(io);
}
/**
* @brief Set queue id to which IO should be submitted
*
* @param[in] io OCF IO to set up
* @param[in] queue IO queue id
*/
static inline void ocf_io_set_queue(struct ocf_io *io, uint32_t queue)
{
io->io_queue = queue;
}
/**
* @brief Handle IO in cache engine
*
* @param[in] io OCF IO to be handled
* @param[in] opaque OCF opaque
*/
void ocf_io_handle(struct ocf_io *io, void *opaque);
#endif /* __OCF_IO_H__ */

109
inc/ocf_io_class.h Normal file
View File

@ -0,0 +1,109 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/**
* @file
* @brief IO class API
*
* File contains structures and methods for handling IO Class
* differentiation features
*/
#ifndef __OCF_IO_CLASS_H__
#define __OCF_IO_CLASS_H__
/**
* @brief OCF IO class information
*/
struct ocf_io_class_info {
char name[OCF_IO_CLASS_NAME_MAX];
/*!< The name of the IO class */
ocf_cache_mode_t cache_mode;
/*!< Cache mode of the IO class */
int16_t priority;
/*!< IO class priority */
uint32_t curr_size;
/*!< Current size of the IO class - number of cache lines which
* were assigned into this IO class
*/
uint32_t min_size;
/*!< Minimum number of cache lines that were guaranteed
* for specified IO class. If current size reach minimum size
* that no more eviction takes place
*/
uint32_t max_size;
/*!< Maximum number of cache lines that might be assigned into
* this IO class. If current size reach maximum size no more
* allocation for this IO class takes place
*/
uint8_t eviction_policy_type;
/*!< The type of eviction policy for given IO class */
ocf_cleaning_t cleaning_policy_type;
/*!< The type of cleaning policy for given IO class */
};
/**
* @brief retrieve io class info
*
* function meant to retrieve information pertaining to particular IO class,
* specifically to fill ocf_io_class_info structure based on input parameters.
*
* @param[in] cache cache id, to which specified request pertains.
* @param[in] io_class id of an io class which shall be retreived.
* @param[out] info io class info structure to be filled as a
* result of this function call.
*
* @return function returns 0 upon successful completion; appropriate error
* code is returned otherwise
*/
int ocf_io_class_get_info(ocf_cache_t cache, uint32_t io_class,
struct ocf_io_class_info *info);
/**
* @brief helper function for ocf_io_class_visit
*
* This function is called back from ocf_io_class_visit for each valid
* configured io class; henceforth all parameters are input parameters,
* no exceptions. It is usable to enumerate all the io classes.
*
* @param[in] cache cache id of cache for which data is being retrieved
* @param[in] io_class_id id of an io class for which callback herein
* is invoked.
* @param[in] cntx a context pointer passed herein from within
* ocf_io_class_visit down to this callback.
*
* @return 0 upon success; Nonzero upon failure (when nonzero is returned,
* this callback won't be invoked for any more io classes)
*/
typedef int (*ocf_io_class_visitor_t)(ocf_cache_t cache,
uint32_t io_class_id, void *cntx);
/**
* @brief enumerate all of the available IO classes.
*
* This function allows enumeration and retrieval of all io class id's that
* are valid for given cache id via visiting all those with callback function
* that is supplied by caller.
*
* @param[in] cache cache id to which given call pertains
* @param[in] visitor a callback function that will be issued for each and every
* IO class that is configured and valid within given cache instance
* @param[in] cntx a context variable - structure that shall be passed to a
* callback function for every call
*
* @return 0 upon successful completion of the function; otherwise nonzero result
* shall be returned
*/
int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor,
void *cntx);
#endif /* __OCF_IO_CLASS_H__ */

41
inc/ocf_logger.h Normal file
View File

@ -0,0 +1,41 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_LOGGER_H__
#define __OCF_LOGGER_H__
/**
* @file
* @brief Logger API
*/
#include <stdarg.h>
/**
* @brief Verbosity levels of context log
*/
typedef enum {
log_emerg,
log_alert,
log_crit,
log_err,
log_warn,
log_notice,
log_info,
log_debug,
} ocf_logger_lvl_t;
struct ocf_logger {
int (*open)(const struct ocf_logger *logger);
void (*close)(const struct ocf_logger *logger);
int (*printf)(const struct ocf_logger *logger, ocf_logger_lvl_t lvl,
const char *fmt, va_list args);
int (*printf_rl)(const char *func_name);
int (*dump_stack)(const struct ocf_logger *logger);
void *priv;
};
#endif /* __OCF_LOGGER_H__ */

99
inc/ocf_metadata.h Normal file
View File

@ -0,0 +1,99 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_METADATA_H__
#define __OCF_METADATA_H__
/**
* @file
* @brief OCF metadata helper function
*
* Those functions can be used by data object implementation.
*/
/**
* @brief Atomic metadata for extended sector
*
* @warning The size of this structure has to be equal 8 bytes
*/
struct ocf_atomic_metadata {
/** Core line of core (in cache line size unit) which are cached */
uint64_t core_line : 46;
/** Core sequence number to which this line belongs to*/
uint32_t core_seq_no : 16;
/** Set bit indicates that given sector is valid (is cached) */
uint32_t valid : 1;
/** Set bit indicates that sector i dirty */
uint32_t dirty : 1;
} __attribute__((packed));
#define OCF_ATOMIC_METADATA_SIZE sizeof(struct ocf_atomic_metadata)
/**
* @brief Get metadata entry (cache mapping) for specified sector of cache
* device
*
* Metadata has sector granularity. It might be used by data object which
* supports atomic writes - (write of data and metadata in one buffer)
*
* @param[in] cache OCF cache instance
* @param[in] addr Sector address in bytes
* @param[out] entry Metadata entry
*
* @retval 0 Metadata retrieved successfully
* @retval Non-zero Error
*/
int ocf_metadata_get_atomic_entry(ocf_cache_t cache, uint64_t addr,
struct ocf_atomic_metadata *entry);
/**
* @brief Probe cache device
*
* @param[in] ctx handle to object designating ocf context
* @param[in] cache_obj Cache data object
* @param[out] clean_shutdown Cache was graceful stopped
* @param[out] cache_dirty Cache is dirty
*
* @retval 0 Probe successfully performed
* @retval -ENODATA Cache has not been detected
* @retval Non-zero ERROR
*/
int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
bool *clean_shutdown, bool *cache_dirty);
/**
* @brief Check if sectors in cache line before given address are invalid
*
* It might be used by data object which supports
* atomic writes - (write of data and metadata in one buffer)
*
* @param[in] cache OCF cache instance
* @param[in] addr Sector address in bytes
*
* @retval 0 Not all sectors before given address are invalid
* @retval Non-zero Number of sectors before given address
*/
int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr);
/**
* @brief Check if sectors in cache line after given end address are invalid
*
* It might be used by data object which supports
* atomic writes - (write of data and metadata in one buffer)
*
* @param[in] cache OCF cache instance
* @param[in] addr Sector address in bytes
* @param[in] bytes IO size in bytes
*
* @retval 0 Not all sectors after given end address are invalid
* @retval Non-zero Number of sectors after given end address
*/
int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr,
uint32_t bytes);
#endif /* __OCF_METADATA_H__ */

View File

@ -0,0 +1,50 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_METADATA_UPDATER_H__
#define __OCF_METADATA_UPDATER_H__
/**
* @file
* @brief OCF metadata updater API
*
*/
/**
* @brief Run metadata updater
*
* @param[in] mu Metadata updater instance to run
*
* @retval Hint if there is need to rerun without waiting.
*/
uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu);
/**
* @brief Set metadata updater private data
*
* @param[in] c Metadata updater handle
* @param[in] priv Private data
*/
void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv);
/**
* @brief Get metadata updater private data
*
* @param[in] c Metadata updater handle
*
* @retval Metadata updater private data
*/
void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu);
/**
* @brief Get cache instance to which metadata updater belongs
*
* @param[in] c Metadata updater handle
*
* @retval Cache instance
*/
ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu);
#endif /* __OCF_METADATA_UPDATER_H__ */

813
inc/ocf_mngt.h Normal file
View File

@ -0,0 +1,813 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_MNGT_H__
#define __OCF_MNGT_H__
#include "ocf_types.h"
#include "ocf_cache.h"
#include "ocf_core.h"
/**
* @file
* @brief OCF management operations definitions
*/
/**
* @brief Core start configuration
*/
struct ocf_mngt_core_config {
/**
* @brief OCF core data object UUID
*/
struct ocf_data_obj_uuid uuid;
/**
* @brief OCF core data object type
*/
uint8_t data_obj_type;
/**
* @brief OCF core ID number
*/
ocf_core_id_t core_id;
/**
* @brief OCF core name. In case of being NULL, core id is stringified
* to core name
*/
const char *name;
/**
* @brief OCF core name size
*/
size_t name_size;
/**
* @brief OCF cache ID number
*/
ocf_cache_id_t cache_id;
/**
* @brief Add core to pool if cache isn't present or add core to
* earlier loaded cache
*/
bool try_add;
uint32_t seq_cutoff_threshold;
/*!< Sequential cutoff threshold (in bytes) */
struct {
void *data;
size_t size;
} user_metadata;
};
/**
* @brief Get number of OCF caches
*
* @param[in] ctx OCF context
*
* @retval Number of caches in given OCF instance
*/
uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx);
/* Cache instances getters */
/**
* @brief Get OCF cache
*
* @note This function on success also increasing reference counter in given
* cache
*
* @param[in] ctx OCF context
* @param[in] id OCF cache ID
* @param[out] cache OCF cache handle
*
* @retval 0 Get cache successfully
* @retval -OCF_ERR_INV_CACHE_ID Cache ID out of range
* @retval -OCF_ERR_CACHE_NOT_EXIST Cache with given ID is not exist
*/
int ocf_mngt_cache_get(ocf_ctx_t ctx, ocf_cache_id_t id, ocf_cache_t *cache);
/**
* @brief Decrease reference counter in cache
*
* @note If cache don't have any reference - deallocate it
*
* @param[in] cache Handle to cache
*/
void ocf_mngt_cache_put(ocf_cache_t cache);
/**
* @brief Lock cache for management oparations (write lock, exclusive)
*
* @param[in] cache Handle to cache
*
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_INTR Wait operation interrupted
*/
int ocf_mngt_cache_lock(ocf_cache_t cache);
/**
* @brief Lock cache for read - assures cache config does not change while
* lock is being held, while allowing other users to acquire
* read lock in parallel.
*
* @param[in] cache Handle to cache
*
* @retval 0 Cache successfully locked
* @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already
* stopping
* @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use
* @retval -OCF_ERR_INTR Wait operation interrupted
*/
int ocf_mngt_cache_read_lock(ocf_cache_t cache);
/**
* @brief Write-unlock cache
*
* @param[in] cache Handle to cache
*/
void ocf_mngt_cache_unlock(ocf_cache_t cache);
/**
* @brief Read-unlock cache
*
* @param[in] cache Handle to cache
*/
void ocf_mngt_cache_read_unlock(ocf_cache_t cache);
/**
* @brief Cache visitor function
*
* @param[in] cache Handle to cache
* @param[in] cntx Visitor function context
*
* @retval 0 Success
* @retval Non-zero Error
*/
typedef int (*ocf_mngt_cache_visitor_t)(ocf_cache_t cache, void *cntx);
/**
* @brief Loop for each cache
*
* @note Visitor function is called for each cache
*
* @param[in] ctx OCF context
* @param[in] visitor OCF cache visitor function
* @param[in] cntx Context for cache visitor function
*
* @retval 0 Success
* @retval Non-zero Error
*/
int ocf_mngt_cache_visit(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor,
void *cntx);
/**
* @brief Loop for each cache reverse
*
* @note Visitor function is called for each cache
*
* @param[in] ctx OCF context
* @param[in] visitor OCF cache visitor function
* @param[in] cntx Context for cache visitor function
*
* @retval 0 Success
* @retval Non-zero Error
*/
int ocf_mngt_cache_visit_reverse(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor,
void *cntx);
/**
* @brief Cache probe status
*/
struct ocf_mngt_cache_probe_status {
/**
* @brief Gracefully shutdown for cache detected
*/
bool clean_shutdown;
/**
* @brief Cache is dirty and requires flushing
*/
bool cache_dirty;
};
/**
* @brief Cache start configuration
*/
struct ocf_mngt_cache_config {
/**
* @brief Cache ID. In case of setting this field to invalid cache
* id first available cache ID will be set
*/
ocf_cache_id_t id;
/**
* @brief Cache name. In case of being NULL, cache id is stringified to
* cache name
*/
const char *name;
/**
* @brief Size of cache name
*/
size_t name_size;
/**
* @brief Cache mode
*/
ocf_cache_mode_t cache_mode;
/**
* @brief Eviction policy type
*/
ocf_eviction_t eviction_policy;
/**
* @brief Cache line size
*/
ocf_cache_line_size_t cache_line_size;
/**
* @brief Metadata layout (stripping/sequential)
*/
ocf_metadata_layout_t metadata_layout;
bool metadata_volatile;
/**
* @brief Backfill configuration
*/
struct {
uint32_t max_queue_size;
uint32_t queue_unblock_size;
} backfill;
/**
* @brief Number of I/O queues to be created
*/
uint32_t io_queues;
/**
* @brief Start cache and keep it locked
*
* @note In this case caller is able to perform additional activities
* and then shall unlock cache
*/
bool locked;
/**
* @brief Use pass-through mode for I/O requests unaligned to 4KiB
*/
bool pt_unaligned_io;
/**
* @brief If set, try to submit all I/O in fast path.
*/
bool use_submit_io_fast;
};
/**
* @brief Cache attach configuration
*/
struct ocf_mngt_cache_device_config {
/**
* @brief Cache data object UUID
*/
struct ocf_data_obj_uuid uuid;
/**
* @brief Cache data object type
*/
uint8_t data_obj_type;
/**
* @brief Cache line size
*/
ocf_cache_line_size_t cache_line_size;
/**
* @brief Ignore warnings and start cache
*
* @note It will force starting cache despite the:
* - overwrite dirty shutdown of previous cache
* - ignore cache with dirty shutdown and reinitialize cache
*/
bool force;
/**
* @brief Minimum free RAM required to start cache. Set during
* cache start procedure
*/
uint64_t min_free_ram;
/**
* @brief If set, cache features (like discard) are tested
* before starting cache
*/
bool perform_test;
/**
* @brief If set, cache device will be discarded on cache start
*/
bool discard_on_start;
};
/**
* @brief Start cache instance
*
* @param[in] ctx OCF context
* @param[out] cache Cache handle
* @param[in] cfg Starting cache configuration
*
* @retval 0 Cache started successfully
* @retval Non-zero Error occurred and starting cache failed
*/
int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
struct ocf_mngt_cache_config *cfg);
/**
* @brief Stop cache instance
*
* @param[in] cache Cache handle
*
* @retval 0 Cache successfully stopped
* @retval Non-zero Error occurred during stopping cache
*/
int ocf_mngt_cache_stop(ocf_cache_t cache);
/**
* @brief Stop cache instance without acquiring cache lock - caller is
* required to hold cache write lock when calling this
*
* @param[in] cache Cache handle
*
* @retval 0 Cache successfully stopped
* @retval Non-zero Error occurred during stopping cache
*/
int ocf_mngt_cache_stop_nolock(ocf_cache_t cache);
/**
* @brief Attach caching device to cache instance
*
* @param[in] cache Cache handle
* @param[in] device_cfg Caching device configuration
*
* @retval 0 Cache cache successfully attached
* @retval Non-zero Error occurred during attaching cache
*/
int ocf_mngt_cache_attach(ocf_cache_t cache,
struct ocf_mngt_cache_device_config *device_cfg);
/**
* @brief Attach caching device to cache instance without acquiring cache lock
* - caller is required to hold cache write lock when calling this
*
* @param[in] cache Cache handle
* @param[in] device_cfg Caching device configuration
*
* @retval 0 Cache cache successfully attached
* @retval Non-zero Error occurred during attaching cache
*/
int ocf_mngt_cache_attach_nolock(ocf_cache_t cache,
struct ocf_mngt_cache_device_config *device_cfg);
/**
* @brief Detach caching cache
*
* @param[in] cache Cache handle
*
* @retval 0 Cache cache successfully detached
* @retval Non-zero Error occurred during stopping cache
*/
int ocf_mngt_cache_detach(ocf_cache_t cache);
/**
* @brief Load cache instance
*
* @param[in] cache Cache handle
* @param[in] cfg Cache configuration
* @param[in] device_cfg Caching device configuration
*
* @retval 0 Cache successfully loaded
* @retval Non-zero Error occurred during loading cache
*/
int ocf_mngt_cache_load(ocf_ctx_t ctx, ocf_cache_t *cache,
struct ocf_mngt_cache_config *cfg,
struct ocf_mngt_cache_device_config *device_cfg);
/* Adding and removing cores */
/**
* @brief Add core to cache instance
*
* @param[in] cache Cache handle
* @param[in] core Core object handle
* @param[in] cfg Core configuration
*
* @retval 0 Core successfully added core to cache
* @retval Non-zero Error occurred and adding core failed
*/
int ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg);
/**
* @brief Add core to cache instance without acquiring cache lock - caller is
required to hold cache write lock when calling this
*
* @param[in] cache Cache handle
* @param[in] core Core object handle
* @param[in] cfg Core configuration
*
* @retval 0 Core successfully added core to cache
* @retval Non-zero Error occurred and adding core failed
*/
int ocf_mngt_cache_add_core_nolock(ocf_cache_t cache, ocf_core_t *core,
struct ocf_mngt_core_config *cfg);
/**
* @brief Remove core from cache instance
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] detach only detach core without removing it from cache metadata
*
* @retval 0 Core successfully removed core from cache
* @retval Non-zero Error occurred and removing core failed
*/
int ocf_mngt_cache_remove_core(ocf_cache_t cache, ocf_core_id_t core_id,
bool detach);
/**
* @brief Remove core from cache instance without acquiring cache lock - caller
* is required to hold cache write lock when calling this
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] detach only detach core without removing it from cache metadata
*
* @retval 0 Core successfully removed core from cache
* @retval Non-zero Error occurred and removing core failed
*/
int ocf_mngt_cache_remove_core_nolock(ocf_cache_t cache, ocf_core_id_t core_id,
bool detach);
/* Flush operations */
/**
* @brief Flush data from given cache
*
* @param[in] cache Cache handle
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully flushed given cache
* @retval Non-zero Error occurred and flushing cache failed
*/
int ocf_mngt_cache_flush(ocf_cache_t cache, bool interruption);
/**
* @brief Flush data from given cache without acquiring cache lock - caller is
* required to hold cache write OR read lock when calling this
*
* @param[in] cache Cache handle
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully flushed given cache
* @retval Non-zero Error occurred and flushing cache failed
*/
int ocf_mngt_cache_flush_nolock(ocf_cache_t cache, bool interruption);
/**
* @brief Flush data to given core
*
* @param[in] cache Cache handle
* @param[in] id Core ID
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully flushed data to given core
* @retval Non-zero Error occurred and flushing data to core failed
*/
int ocf_mngt_core_flush(ocf_cache_t cache, ocf_core_id_t id, bool interruption);
/**
* @brief Flush data to given core without acquiring cache lock - caller is
* required to hold cache write OR read lock when calling this
*
* @param[in] cache Cache handle
* @param[in] id Core ID
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully flushed data to given core
* @retval Non-zero Error occurred and flushing data to core failed
*/
int ocf_mngt_core_flush_nolock(ocf_cache_t cache, ocf_core_id_t id,
bool interruption);
/**
* @brief Interrupt existing flushing of cache or cache
*
* @param[in] cache Cache instance
*
* @retval 0 Operation success
* @retval Non-zero Operation failure
*/
int ocf_mngt_cache_flush_interrupt(ocf_cache_t cache);
/**
* @brief Purge data to given core
*
* @param[in] cache Cache handle
* @param[in] id Core ID
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully purged data to given core
* @retval Non-zero Error occurred and purging data to core failed
*/
int ocf_mngt_core_purge(ocf_cache_t cache, ocf_core_id_t id, bool interruption);
/**
* @brief Purge data from given cache
*
* @param[in] cache Cache handle
* @param[in] interruption Allow for interruption
*
* @retval 0 Successfully purged given cache
* @retval Non-zero Error occurred and purging cache failed
*/
int ocf_mngt_cache_purge(ocf_cache_t cache, bool interruption);
/**
* @brief Set cleaning policy in given cache
*
* @param[in] cache Cache handle
* @param[in] type Cleainig policy type
*
* @retval 0 Policy has been set successfully
* @retval Non-zero Error occurred and policy has not been set
*/
int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type);
/**
* @brief Get current cleaning policy from given cache
*
* @param[in] cache Cache handle
* @param[out] type Variable to store current cleaning policy type
*
* @retval 0 Policy has been get successfully
* @retval Non-zero Error occurred and policy has not been get
*/
int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type);
/**
* @brief Set cleaning parameter in given cache
*
* @param[in] cache Cache handle
* @param[in] param_id Cleaning policy parameter id
* @param[in] param_value Cleaning policy parameter value
*
* @retval 0 Parameter has been set successfully
* @retval Non-zero Error occurred and parameter has not been set
*/
int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type,
uint32_t param_id, uint32_t param_value);
/**
* @brief Get cleaning parameter from given cache
*
* @param[in] cache Cache handle
* @param[in] param_id Cleaning policy parameter id
* @param[in] param_value Variable to store parameter value
*
* @retval 0 Parameter has been get successfully
* @retval Non-zero Error occurred and parameter has not been get
*/
int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache,ocf_cleaning_t type,
uint32_t param_id, uint32_t *param_value);
/**
* @brief IO class configuration
*/
struct ocf_mngt_io_class_config {
/**
* @brief IO class ID
*/
uint32_t class_id;
/**
* @brief IO class name
*/
const char *name;
/**
* @brief IO class eviction priority
*/
int16_t prio;
/**
* @brief IO class cache mode
*/
ocf_cache_mode_t cache_mode;
/**
* @brief IO class minimum size
*/
uint32_t min_size;
/**
* @brief IO class maximum size
*/
uint32_t max_size;
};
/**
* @brief Configure IO class in given cache
*
* @param[in] cache Cache handle
* @param[in] cfg IO class configuration
*
* @retval 0 Configuration have been set successfully
* @retval Non-zero Error occurred and configuration not been set
*/
int ocf_mngt_io_class_configure(ocf_cache_t cache,
const struct ocf_mngt_io_class_config *cfg);
/**
* @brief Set core sequential cutoff threshold
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] thresh threshold in bytes for sequential cutoff
*
* @retval 0 Sequential cutoff threshold has been set successfully
* @retval Non-zero Error occured and threshold hasn't been updated
*/
int ocf_mngt_set_seq_cutoff_threshold(ocf_cache_t cache, ocf_core_id_t core_id,
uint32_t thresh);
/**
* @brief Set core sequential cutoff policy
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] policy sequential cutoff policy
*
* @retval 0 Sequential cutoff policy has been set successfully
* @retval Non-zero Error occured and policy hasn't been updated
*/
int ocf_mngt_set_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id,
ocf_seq_cutoff_policy policy);
/**
* @brief Get core sequential cutoff threshold
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] thresh threshold in bytes for sequential cutoff
*
* @retval 0 Sequential cutoff threshold has been get successfully
* @retval Non-zero Error occured
*/
int ocf_mngt_get_seq_cutoff_threshold(ocf_cache_t cache, ocf_core_id_t core_id,
uint32_t *thresh);
/**
* @brief Get core sequential cutoff policy
*
* @param[in] cache Cache handle
* @param[in] core_id Core ID
* @param[in] policy sequential cutoff policy
*
* @retval 0 Sequential cutoff policy has been get successfully
* @retval Non-zero Error occured
*/
int ocf_mngt_get_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id,
ocf_seq_cutoff_policy *policy);
/**
* @brief Set cache mode in given cache
*
* @param[in] cache Cache handle
* @param[in] mode Cache mode to set
* @param[in] flush Perform flushing before switch cache mode
*
* @retval 0 Cache mode have been set successfully
* @retval Non-zero Error occurred and cache mode not been set
*/
int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode,
uint8_t flush);
/**
* @brief Set cache fallback Pass Through error threshold
*
* @param[in] cache Cache handle
* @param[in] threshold Value to be set as threshold
*
* @retval 0 Fallback-PT threshold have been set successfully
* @retval Non-zero Error occurred
*/
int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache,
uint32_t threshold);
/**
* @brief Get cache fallback Pass Through error threshold
*
* @param[in] cache Cache handle
* @param[out] threshold Fallback-PT threshold
*
* @retval 0 Fallback-PT threshold have been get successfully
* @retval Non-zero Error occurred
*/
int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache,
uint32_t *threshold);
/**
* @brief Reset cache fallback Pass Through error counter
*
* @param[in] cache Cache handle
*
* @retval 0 Threshold have been reset successfully
*/
int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache);
/**
* @brief Initialize core pool
*
* @param[in] ctx OCF context
*/
void ocf_mngt_core_pool_init(ocf_ctx_t ctx);
/**
* @brief Get core pool count
*
* @param[in] ctx OCF context
*
* @retval Number of cores in core pool
*/
int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx);
/**
* @brief Add core to pool
*
* @param[in] ctx OCF context
* @param[in] uuid Cache data object UUID
* @param[in] type OCF core data object type
*
* @retval 0 Core added to pool successfully
* @retval Non-zero Error occurred and adding core to poll failed
*/
int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type);
/**
* @brief Add core to pool
*
* @param[in] ctx OCF context
* @param[in] uuid Cache data object UUID
* @param[in] type OCF core data object type
*
* @retval Handler to object with same UUID
* @retval NULL Not found object with that id
*/
ocf_data_obj_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid,
ocf_data_obj_type_t type);
/**
* @brief Iterate over all object in pool and call visitor callback
*
* @param[in] ctx OCF context
* @param[in] visitor Visitor callback
* @param[in] visior_ctx CContext for visitor callback
*
* @retval Handler to object with same UUID
* @retval NULL Not found object with that id
*/
int ocf_mngt_core_pool_visit(ocf_ctx_t ctx,
int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx);
/**
* @brief Remove core from pool
*
* @param[in] ctx OCF context
* @param[in] obj Core data object
*/
void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_data_obj_t obj);
/**
* @brief Close and remove core from pool
*
* @param[in] ctx OCF context
* @param[in] obj Core data object
*/
void ocf_mngt_core_pool_close_and_remove(ocf_ctx_t ctx, ocf_data_obj_t obj);
/**
* @brief Deinit core pool
*
* @param[in] ctx OCF context
*/
void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx);
#endif /* __OCF_CACHE_H__ */

65
inc/ocf_queue.h Normal file
View File

@ -0,0 +1,65 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_QUEUE_H_
#define OCF_QUEUE_H_
/**
* @file
* @brief OCF queues API
*/
/**
* @brief Run queue processing
*
* @param[in] q Queue to run
*/
void ocf_queue_run(ocf_queue_t q);
/**
* @brief Set queue private data
*
* @param[in] q I/O queue
* @param[in] priv Private data
*/
void ocf_queue_set_priv(ocf_queue_t q, void *priv);
/**
* @brief Get queue private data
*
* @param[in] q I/O queue
*
* @retval I/O queue private data
*/
void *ocf_queue_get_priv(ocf_queue_t q);
/**
* @brief Get number of pending requests in I/O queue
*
* @param[in] q I/O queue
*
* @retval Number of pending requests in I/O queue
*/
uint32_t ocf_queue_pending_io(ocf_queue_t q);
/**
* @brief Get cache instance to which I/O queue belongs
*
* @param[in] q I/O queue
*
* @retval Cache instance
*/
ocf_cache_t ocf_queue_get_cache(ocf_queue_t q);
/**
* @brief Get I/O queue id
*
* @param[in] q I/O queue
*
* @retval I/O queue id
*/
uint32_t ocf_queue_get_id(ocf_queue_t q);
#endif

207
inc/ocf_stats.h Normal file
View File

@ -0,0 +1,207 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/**
* @file
* @brief OCF API for getting and reseting statistics
*
* This file contains routines pertaining to retrieval and
* manipulation of OCF IO statistics.
*/
#ifndef __OCF_STATS_H__
#define __OCF_STATS_H__
struct ocf_io;
/**
* @brief OCF requests statistics like hit, miss, etc...
*
* @note To calculate number of hits request do:
* total - (partial_miss + full_miss)
*/
struct ocf_stats_req {
/** Number of partial misses */
uint64_t partial_miss;
/** Number of full misses */
uint64_t full_miss;
/** Total of requests */
uint64_t total;
/** Pass-through requests */
uint64_t pass_through;
};
/**
* @brief OCF error statistics
*/
struct ocf_stats_error {
/** Read errors */
uint32_t read;
/** Write errors */
uint32_t write;
};
/**
* @brief OCF block statistics in bytes
*/
struct ocf_stats_block {
/** Number of blocks read */
uint64_t read;
/** Number of blocks written */
uint64_t write;
};
/**
* Statistics appropriate for given IO class
*/
struct ocf_stats_io_class {
/** Read requests statistics */
struct ocf_stats_req read_reqs;
/** Writes requests statistics */
struct ocf_stats_req write_reqs;
/** Block requests statistics */
struct ocf_stats_block blocks;
/** Number of cache lines available for given partition */
uint64_t free_clines;
/** Number of cache lines within lru list */
uint64_t occupancy_clines;
/** Number of dirty cache lines assigned to specific partition */
uint64_t dirty_clines;
};
#define IO_PACKET_NO 12
#define IO_ALIGN_NO 4
/**
* @brief Core debug statistics
*/
struct ocf_stats_core_debug {
/** I/O sizes being read (grouped by packets) */
uint64_t read_size[IO_PACKET_NO];
/** I/O sizes being written (grouped by packets) */
uint64_t write_size[IO_PACKET_NO];
/** I/O alignment for reads */
uint64_t read_align[IO_ALIGN_NO];
/** I/O alignment for writes */
uint64_t write_align[IO_ALIGN_NO];
};
/**
* @brief OCF core statistics
*/
struct ocf_stats_core {
/** Core size in cache line size unit */
uint64_t core_size;
/** Core size in bytes unit */
uint64_t core_size_bytes;
/** Number of cache lines allocated in the cache for this core */
uint32_t cache_occupancy;
/** Number of dirty cache lines allocated in the cache for this core */
uint32_t dirty;
/** Number of block flushed in ongoing flush operation */
uint32_t flushed;
/** How long core is dirty in seconds unit */
uint32_t dirty_for;
/** Read requests statistics */
struct ocf_stats_req read_reqs;
/** Write requests statistics */
struct ocf_stats_req write_reqs;
/** Block requests for cache data object statistics */
struct ocf_stats_block cache_obj;
/** Block requests for core data object statistics */
struct ocf_stats_block core_obj;
/** Block requests submitted by user to this core */
struct ocf_stats_block core;
/** Cache data object error statistics */
struct ocf_stats_error cache_errors;
/** Core data object error statistics */
struct ocf_stats_error core_errors;
/** Debug statistics */
struct ocf_stats_core_debug debug_stat;
/** Sequential cutoff threshold (in bytes) */
uint32_t seq_cutoff_threshold;
/** Sequential cutoff policy */
ocf_seq_cutoff_policy seq_cutoff_policy;
};
/**
* @brief Initialize or reset statistics.
*
* Initialize or reset counters used for statistics.
*
* @param[in] cache OCF cache device handle
* @param[in] core_id Id of core for which statistics should be initialized.
*/
int ocf_stats_initialize(ocf_cache_t cache, ocf_core_id_t core_id);
/**
* @brief ocf_io_class_get_stats retrieve cache statistics
*
* Retrieve buffer of cache statistics for given cache instance.
*
* @param[in] core core ID to which request pertains
* @param[in] io_class IO class, stats of which are requested
* @param[out] stats statistics structure that shall be filled as
* a result of this function invocation.
*
* @result zero upon successful completion; error code otherwise
*/
int ocf_io_class_get_stats(ocf_core_t core, uint32_t io_class,
struct ocf_stats_io_class *stats);
/**
* @brief retrieve core stats
*
* Retrieve ocf per core stats (for all IO classes together)
*
* @param[in] core core ID to which request pertains
* @param[out] stats statistics structure that shall be filled as
* a result of this function invocation.
*
* @result zero upon successful completion; error code otherwise
*/
int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats);
/**
* @brief update stats given IO request
*
* Function meant to update stats for IO request.
*
* @note This function shall be invoked for eac IO request processed
*
* @param[in] core to which request pertains
* @param[in] io request for which stats are being updated
*/
void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io);
#endif /* __OCF_STATS_H__ */

190
inc/ocf_stats_builder.h Normal file
View File

@ -0,0 +1,190 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/**
* @file
* @brief OCF API for collecting statistics
*
* This file contains routines pertaining to retrieval and
* manipulation of OCF IO statistics.
*/
#ifndef __OCF_STATS_BUILDER_H__
#define __OCF_STATS_BUILDER_H__
/**
* Entire row of statistcs
*/
struct ocf_stat {
/** Value */
uint64_t value;
/** percent x10 */
uint64_t percent;
};
/**
* @brief Usage statistics in 4 KiB unit
*
* An example of presenting statistics:
* <pre>
*
* Usage statistics Count % Units
*
* Occupancy 20 50.0 4KiB blocks
* Free 20 50.0 4KiB blocks
* Clean 15 75.0 4KiB blocks
* Dirty 5 25.0 4KiB blocks
*
* </pre>
*/
struct ocf_stats_usage {
struct ocf_stat occupancy;
struct ocf_stat free;
struct ocf_stat clean;
struct ocf_stat dirty;
};
/**
* @brief Requests statistcs
*
* An example of presenting statistics:
* <pre>
*
* Request statistics Count % Units
*
* Read hits 10 4.5 Requests
* Read partial misses 1 0.5 Requests
* Read full misses 211 95.0 Requests
* Read total 222 100.0 Requests
*
* Write hits 0 0.0 Requests
* Write partial misses 0 0.0 Requests
* Write full misses 0 0.0 Requests
* Write total 0 0.0 Requests
*
* Pass-Through reads 0 0.0 Requests
* Pass-Through writes 0 0.0 Requests
* Serviced requests 222 100.0 Requests
*
* Total requests 222 100.0 Requests
*
* </pre>
*/
struct ocf_stats_requests {
struct ocf_stat rd_hits;
struct ocf_stat rd_partial_misses;
struct ocf_stat rd_full_misses;
struct ocf_stat rd_total;
struct ocf_stat wr_hits;
struct ocf_stat wr_partial_misses;
struct ocf_stat wr_full_misses;
struct ocf_stat wr_total;
struct ocf_stat rd_pt;
struct ocf_stat wr_pt;
struct ocf_stat serviced;
struct ocf_stat total;
};
/**
* @brief Block statistics
*
* An example of presenting statistics:
* <pre>
*
* Block statistics Count % Units
*
* Reads from core data object(s) 426 100.0 4KiB blocks
* Writes to core data object(s) 0 0.0 4KiB blocks
* Total to/from core data object (s) 426 100.0 4KiB blocks
*
* Reads from cache data object 13 3.0 4KiB blocks
* Writes to cache data object 426 97.0 4KiB blocks
* Total to/from cache data object 439 100.0 4KiB blocks
*
* Reads from core(s) 439 100.0 4KiB blocks
* Writes to core(s) 0 0.0 4KiB blocks
* Total to/from core(s) 439 100.0 4KiB blocks
*
* </pre>
*/
struct ocf_stats_blocks {
struct ocf_stat core_obj_rd;
struct ocf_stat core_obj_wr;
struct ocf_stat core_obj_total;
struct ocf_stat cache_obj_rd;
struct ocf_stat cache_obj_wr;
struct ocf_stat cache_obj_total;
struct ocf_stat volume_rd;
struct ocf_stat volume_wr;
struct ocf_stat volume_total;
};
/**
* @brief Errors statistics
*
* An example of presenting statistics:
* <pre>
*
* Error statistics Count % Units
*
* Cache read errors 0 0.0 Requests
* Cache write errors 0 0.0 Requests
* Cache total errors 0 0.0 Requests
*
* Core read errors 0 0.0 Requests
* Core write errors 0 0.0 Requests
* Core total errors 0 0.0 Requests
*
* Total errors 0 0.0 Requests
*
* </pre>
*/
struct ocf_stats_errors {
struct ocf_stat core_obj_rd;
struct ocf_stat core_obj_wr;
struct ocf_stat core_obj_total;
struct ocf_stat cache_obj_rd;
struct ocf_stat cache_obj_wr;
struct ocf_stat cache_obj_total;
struct ocf_stat total;
};
/**
* @param Collect statistics for given cache
*
* @param cache Cache instance for each statistics will be collected
* @param usage Usage statistics
* @param rq Request statistics
* @param blocks Blocks statistics
* @param errors Errors statistics
*
* @retval 0 Success
* @retval Non-zero Error
*/
int ocf_stats_collect_cache(ocf_cache_t cache,
struct ocf_stats_usage *usage,
struct ocf_stats_requests *rq,
struct ocf_stats_blocks *blocks,
struct ocf_stats_errors *errors);
/**
* @param Collect statistics for given core
*
* @param cache Core for each statistics will be collected
* @param usage Usage statistics
* @param rq Request statistics
* @param blocks Blocks statistics
* @param errors Errors statistics
*
* @retval 0 Success
* @retval Non-zero Error
*/
int ocf_stats_collect_core(ocf_core_t core,
struct ocf_stats_usage *usage,
struct ocf_stats_requests *rq,
struct ocf_stats_blocks *blocks,
struct ocf_stats_errors *errors);
#endif /* __OCF_STATS_BUILDER_H__ */

95
inc/ocf_types.h Normal file
View File

@ -0,0 +1,95 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/**
* @file
* @brief OCF types
*/
#ifndef __OCF_TYPES_H_
#define __OCF_TYPES_H_
#include "ocf_env_headers.h"
/**
* @brief cache id type (by default designated as 16 bit unsigned integer)
*/
typedef uint16_t ocf_cache_id_t;
/**
* @brief cache line type (by default designated as 32 bit unsigned integer)
*/
typedef uint32_t ocf_cache_line_t;
/**
* @brief core id type (by default designated as 16 bit unsigned integer)
*/
typedef uint16_t ocf_core_id_t;
/**
* @brief core sequence number type (by default designated as 16 bit unsigned integer)
*/
typedef uint16_t ocf_seq_no_t;
/**
* @brief partition id type (by default designated as 16 bit unsigned integer)
*/
typedef uint16_t ocf_part_id_t;
/**
* @brief handle to object designating ocf context
*/
typedef struct ocf_ctx *ocf_ctx_t;
struct ocf_cache;
/**
* @brief handle to object designating ocf cache device
*/
typedef struct ocf_cache *ocf_cache_t;
struct ocf_core;
/**
* @brief handle to object designating ocf core object
*/
typedef struct ocf_core *ocf_core_t;
struct ocf_data_obj;
/**
* @brief handle to object designating ocf data object
*/
typedef struct ocf_data_obj *ocf_data_obj_t;
struct ocf_data_obj_type;
/**
* @brief handle to data object type
*/
typedef const struct ocf_data_obj_type *ocf_data_obj_type_t;
/**
* @brief handle to data object uuid
*/
typedef struct ocf_data_obj_uuid *ocf_uuid_t;
/**
* @brief handle to object designating ocf context object
*/
typedef void ctx_data_t;
/**
* @brief handle to I/O queue
*/
typedef struct ocf_queue *ocf_queue_t;
/**
* @brief handle to cleaner
*/
typedef struct ocf_cleaner *ocf_cleaner_t;
/**
* @brief handle to metadata_updater
*/
typedef struct ocf_metadata_updater *ocf_metadata_updater_t;
#endif

74
inc/ocf_utilities.h Normal file
View File

@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_UTILITIES_H__
#define __OCF_UTILITIES_H__
/**
* @file
* @brief OCF memory pool reference
*/
struct ocf_mpool;
/**
* @brief Create OCF memory pool
*
* @param cache OCF cache instance
* @param size Size of particular item
* @param hdr_size Header size before array of items
* @param flags Allocation flags
* @param mpool_max Maximal allocator size (power of two)
* @param fmt_name Format name of allocator
* @param ... Format parameters
*
* @return OCF memory pool reference
*/
struct ocf_mpool *ocf_mpool_create(struct ocf_cache *cache,
uint32_t hdr_size, uint32_t size, int flags, int mpool_max,
const char *name_perfix);
/**
* @brief Destroy existing memory pool
*
* @param mpool memory pool
*/
void ocf_mpool_destroy(struct ocf_mpool *mpool);
/**
* @brief Allocate new items of memory pool
*
* @note Allocation based on ATOMIC memory pool and this function can be called
* when IRQ disable
*
* @param mpool OCF memory pool reference
* @param count Count of elements to be allocated
*
* @return Pointer to the new items
*/
void *ocf_mpool_new(struct ocf_mpool *mpool, uint32_t count);
/**
* @brief Allocate new items of memory pool with specified allocation flag
*
* @param mpool OCF memory pool reference
* @param count Count of elements to be allocated
* @param flags Kernel allocation falgs
*
* @return Pointer to the new items
*/
void *ocf_mpool_new_f(struct ocf_mpool *mpool, uint32_t count, int flags);
/**
* @brief Free existing items of memory pool
*
* @param mpool OCF memory pool reference
* @param items Items to be freed
* @param count - Count of elements to be free
*/
void ocf_mpool_del(struct ocf_mpool *mpool, void *items, uint32_t count);
#endif /* __OCF_UTILITIES_H__ */

735
src/cleaning/acp.c Normal file
View File

@ -0,0 +1,735 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cleaning.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_rq.h"
#include "../cleaning/acp.h"
#include "../engine/engine_common.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "cleaning_priv.h"
#define OCF_ACP_DEBUG 0
#if 1 == OCF_ACP_DEBUG
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", __func__, __LINE__, ##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#define ACP_DEBUG_INIT(acp) acp->checksum = 0
#define ACP_DEBUG_BEGIN(acp, cache_line) acp->checksum ^= cache_line
#define ACP_DEBUG_END(acp, cache_line) acp->checksum ^= cache_line
#define ACP_DEBUG_CHECK(acp) ENV_BUG_ON(acp->checksum)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#define ACP_DEBUG_INIT(acp)
#define ACP_DEBUG_BEGIN(acp, cache_line)
#define ACP_DEBUG_END(acp, cache_line)
#define ACP_DEBUG_CHECK(acp)
#endif
#define ACP_CHUNK_SIZE (100 * MiB)
/* minimal time to chunk cleaning after error */
#define ACP_CHUNK_CLEANING_BACKOFF_TIME 5
/* time to sleep when nothing to clean in ms */
#define ACP_BACKOFF_TIME_MS 1000
#define ACP_MAX_BUCKETS 11
/* Upper thresholds for buckets in percent dirty pages. First bucket should have
* threshold=0 - it isn't cleaned and we don't want dirty chunks staying dirty
* forever. Last bucket also should stay at 100 for obvious reasons */
static const uint16_t ACP_BUCKET_DEFAULTS[ACP_MAX_BUCKETS] = { 0, 10, 20, 30, 40,
50, 60, 70, 80, 90, 100 };
struct acp_flush_context {
/* number of cache lines in flush */
uint64_t size;
/* chunk_for error handling */
struct acp_chunk_info *chunk;
/* cache lines to flush */
struct flush_data data[OCF_ACP_MAX_FLUSH_MAX_BUFFERS];
/* flush error code */
int error;
};
struct acp_state {
/* currently cleaned chunk */
struct acp_chunk_info *chunk;
/* cache line iterator within current chunk */
unsigned iter;
/* true if there are cache lines to process
* current chunk */
bool in_progress;
};
struct acp_chunk_info {
struct list_head list;
uint64_t chunk_id;
uint64_t next_cleaning_timestamp;
ocf_core_id_t core_id;
uint16_t num_dirty;
uint8_t bucket_id;
};
struct acp_bucket {
struct list_head chunk_list;
uint16_t threshold; /* threshold in clines */
};
struct acp_context {
env_rwsem chunks_lock;
/* number of chunks per core */
uint64_t num_chunks[OCF_CORE_MAX];
/* per core array of all chunks */
struct acp_chunk_info *chunk_info[OCF_CORE_MAX];
struct acp_bucket bucket_info[ACP_MAX_BUCKETS];
/* total number of chunks in cache */
uint64_t chunks_total;
/* structure to keep track of I/O in progress */
struct acp_flush_context flush;
/* cleaning state persistent over subsequent calls to
perform_cleaning */
struct acp_state state;
#if 1 == OCF_ACP_DEBUG
/* debug only */
uint64_t checksum;
#endif
};
struct acp_core_line_info
{
ocf_cache_line_t cache_line;
ocf_core_id_t core_id;
uint64_t core_line;
};
#define ACP_LOCK_CHUNKS_RD() env_rwsem_down_read(&acp->chunks_lock)
#define ACP_UNLOCK_CHUNKS_RD() env_rwsem_up_read(&acp->chunks_lock)
#define ACP_LOCK_CHUNKS_WR() env_rwsem_down_write(&acp->chunks_lock)
#define ACP_UNLOCK_CHUNKS_WR() env_rwsem_up_write(&acp->chunks_lock)
static struct acp_context *_acp_get_ctx_from_cache(struct ocf_cache *cache)
{
return cache->cleaning_policy_context;
}
static struct acp_cleaning_policy_meta* _acp_meta_get(
struct ocf_cache *cache, uint32_t cache_line,
struct cleaning_policy_meta *policy_meta)
{
ocf_metadata_get_cleaning_policy(cache, cache_line, policy_meta);
return &policy_meta->meta.acp;
}
static void _acp_meta_set(struct ocf_cache *cache, uint32_t cache_line,
struct cleaning_policy_meta *policy_meta)
{
ocf_metadata_set_cleaning_policy(cache, cache_line, policy_meta);
}
static struct acp_core_line_info _acp_core_line_info(struct ocf_cache *cache,
ocf_cache_line_t cache_line)
{
struct acp_core_line_info acp_core_line_info = {.cache_line = cache_line, };
ocf_metadata_get_core_info(cache, cache_line, &acp_core_line_info.core_id,
&acp_core_line_info.core_line);
return acp_core_line_info;
}
static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct acp_core_line_info core_line =
_acp_core_line_info(cache, cache_line);
uint64_t chunk_id;
chunk_id = core_line.core_line * ocf_line_size(cache) / ACP_CHUNK_SIZE;
return &acp->chunk_info[core_line.core_id][chunk_id];
}
#define for_each_core(cache, iter) \
for (iter = 0; iter < OCF_CORE_MAX; iter++) \
if (cache->core_conf_meta[iter].added)
static void _acp_remove_cores(struct ocf_cache *cache)
{
int i;
for_each_core(cache, i)
cleaning_policy_acp_remove_core(cache, i);
}
static int _acp_load_cores(struct ocf_cache *cache)
{
int i;
int err = 0;
for_each_core(cache, i) {
OCF_DEBUG_PARAM(cache, "loading core %i\n", i);
err = cleaning_policy_acp_add_core(cache, i);
if (err)
break;
}
if (err)
_acp_remove_cores(cache);
return err;
}
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
/* TODO: acp meta is going to be removed soon */
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
acp_meta->dirty = 0;
_acp_meta_set(cache, cache_line, &policy_meta);
}
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache)
{
_acp_remove_cores(cache);
env_vfree(cache->cleaning_policy_context);
cache->cleaning_policy_context = NULL;
}
static void _acp_rebuild(struct ocf_cache *cache)
{
ocf_cache_line_t cline;
ocf_core_id_t cline_core_id;
uint32_t step = 0;
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
ocf_metadata_get_core_and_part_id(cache, cline, &cline_core_id,
NULL);
OCF_COND_RESCHED_DEFAULT(step);
if (cline_core_id == OCF_CORE_MAX)
continue;
cleaning_policy_acp_init_cache_block(cache, cline);
if (!metadata_test_dirty(cache, cline))
continue;
cleaning_policy_acp_set_hot_cache_line(cache, cline);
}
ocf_cache_log(cache, log_info, "Finished rebuilding ACP metadata\n");
}
void cleaning_policy_acp_setup(struct ocf_cache *cache)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
config->thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP;
config->flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS;
}
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
int init_metadata)
{
struct acp_context *acp;
int err, i;
/* bug if max chunk number would overflow dirty_no array type */
#if defined (BUILD_BUG_ON)
BUILD_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
#else
ENV_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >=
1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8));
#endif
ENV_BUG_ON(cache->cleaning_policy_context);
cache->cleaning_policy_context = env_vzalloc(sizeof(struct acp_context));
if (!cache->cleaning_policy_context) {
ocf_cache_log(cache, log_err, "acp context allocation error\n");
return -OCF_ERR_NO_MEM;
}
acp = cache->cleaning_policy_context;
env_rwsem_init(&acp->chunks_lock);
for (i = 0; i < ACP_MAX_BUCKETS; i++) {
INIT_LIST_HEAD(&acp->bucket_info[i].chunk_list);
acp->bucket_info[i].threshold =
((ACP_CHUNK_SIZE/ocf_line_size(cache)) *
ACP_BUCKET_DEFAULTS[i]) / 100;
}
if (cache->conf_meta->core_obj_count > 0) {
err = _acp_load_cores(cache);
if (err) {
cleaning_policy_acp_deinitialize(cache);
return err;
}
}
_acp_rebuild(cache);
return 0;
}
int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t param_value)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
switch (param_id) {
case ocf_acp_wake_up_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ACP_MIN_WAKE_UP,
OCF_ACP_MAX_WAKE_UP,
"thread_wakeup_time");
config->thread_wakeup_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"wake-up time: %d\n", config->thread_wakeup_time);
break;
case ocf_acp_flush_max_buffers:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ACP_MIN_FLUSH_MAX_BUFFERS,
OCF_ACP_MAX_FLUSH_MAX_BUFFERS,
"flush_max_buffers");
config->flush_max_buffers = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread max "
"buffers flushed per iteration: %d\n",
config->flush_max_buffers);
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t *param_value)
{
struct acp_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
switch (param_id) {
case ocf_acp_flush_max_buffers:
*param_value = config->flush_max_buffers;
break;
case ocf_acp_wake_up_time:
*param_value = config->thread_wakeup_time;
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
/* attempt to lock cache line if it's dirty */
static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache,
uint32_t core_id, uint64_t core_line)
{
struct ocf_map_info info;
bool locked = false;
OCF_METADATA_LOCK_RD();
ocf_engine_lookup_map_entry(cache, &info, core_id,
core_line);
if (info.status == LOOKUP_HIT &&
metadata_test_dirty(cache, info.coll_idx) &&
ocf_cache_line_try_lock_rd(cache, info.coll_idx)) {
locked = true;
}
OCF_METADATA_UNLOCK_RD();
return locked ? info.coll_idx : cache->device->collision_table_entries;
}
static void _acp_handle_flush_error(struct ocf_cache *cache,
struct acp_context *acp)
{
struct acp_flush_context *flush = &acp->flush;
flush->chunk->next_cleaning_timestamp = env_get_tick_count() +
env_secs_to_ticks(ACP_CHUNK_CLEANING_BACKOFF_TIME);
if (ocf_cache_log_rl(cache)) {
ocf_core_log(&cache->core_obj[flush->chunk->core_id],
log_err, "Cleaning error (%d) in range"
" <%llu; %llu) backing off for %u seconds\n",
flush->error,
flush->chunk->chunk_id * ACP_CHUNK_SIZE,
(flush->chunk->chunk_id * ACP_CHUNK_SIZE) +
ACP_CHUNK_SIZE,
ACP_CHUNK_CLEANING_BACKOFF_TIME);
}
}
/* called after flush request completed */
static void _acp_flush_end(
struct ocf_cache *cache,
struct acp_context *acp)
{
struct acp_flush_context *flush = &acp->flush;
int i;
for (i = 0; i < flush->size; i++) {
ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line);
ACP_DEBUG_END(acp, flush->data[i].cache_line);
}
if (flush->error)
_acp_handle_flush_error(cache, acp);
}
/* flush data */
static void _acp_flush(struct ocf_cache *cache, struct acp_context *acp,
uint32_t io_queue, struct acp_flush_context *flush)
{
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = false,
.metadata_locked = false,
.do_sort = false,
.io_queue = io_queue,
};
flush->error = ocf_cleaner_do_flush_data(cache, flush->data,
flush->size, &attribs);
_acp_flush_end(cache, acp);
}
static inline bool _acp_can_clean_chunk(struct ocf_cache *cache,
struct acp_chunk_info *chunk)
{
/* Check if core device is opened and if timeout after cleaning error
* expired or wasn't set in the first place */
return (cache->core_obj[chunk->core_id].opened &&
(chunk->next_cleaning_timestamp > env_get_tick_count() ||
!chunk->next_cleaning_timestamp));
}
static struct acp_chunk_info *_acp_get_cleaning_candidate(
struct ocf_cache *cache)
{
int i;
struct acp_chunk_info *cur;
struct acp_context *acp = cache->cleaning_policy_context;
ACP_LOCK_CHUNKS_RD();
/* go through all buckets in descending order, excluding bucket 0 which
* is supposed to contain all clean chunks */
for (i = ACP_MAX_BUCKETS - 1; i > 0; i--) {
list_for_each_entry(cur, &acp->bucket_info[i].chunk_list, list) {
if (_acp_can_clean_chunk(cache, cur)) {
ACP_UNLOCK_CHUNKS_RD();
return cur;
}
}
}
ACP_UNLOCK_CHUNKS_RD();
return NULL;
}
#define CHUNK_FINISHED -1
/* clean at most 'flush_max_buffers' cache lines from given chunk, starting
* at given cache line */
static int _acp_clean(struct ocf_cache *cache, uint32_t io_queue,
struct acp_chunk_info *chunk, unsigned start,
uint32_t flush_max_buffers)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
size_t lines_per_chunk = ACP_CHUNK_SIZE /
ocf_line_size(cache);
uint64_t first_core_line = chunk->chunk_id * lines_per_chunk;
unsigned i;
OCF_DEBUG_PARAM(cache, "lines per chunk %llu chunk %llu "
"first_core_line %llu\n",
(uint64_t)lines_per_chunk,
chunk->chunk_id,
first_core_line);
ACP_DEBUG_INIT(acp);
acp->flush.size = 0;
acp->flush.chunk = chunk;
for (i = start; i < lines_per_chunk && acp->flush.size < flush_max_buffers ; i++) {
uint64_t core_line = first_core_line + i;
ocf_cache_line_t cache_line;
cache_line = _acp_trylock_dirty(cache, chunk->core_id, core_line);
if (cache_line == cache->device->collision_table_entries)
continue;
acp->flush.data[acp->flush.size].core_id = chunk->core_id;
acp->flush.data[acp->flush.size].core_line = core_line;
acp->flush.data[acp->flush.size].cache_line = cache_line;
acp->flush.size++;
ACP_DEBUG_BEGIN(acp, cache_line);
}
if (acp->flush.size > 0) {
_acp_flush(cache, acp, io_queue, &acp->flush);
}
ACP_DEBUG_CHECK(acp);
return (i == lines_per_chunk) ? CHUNK_FINISHED : i;
}
#define NOTHING_TO_CLEAN 0
#define MORE_TO_CLEAN 1
/* Clean at most 'flush_max_buffers' cache lines from current or newly
* selected chunk */
static int _acp_clean_iteration(struct ocf_cache *cache, uint32_t io_queue,
uint32_t flush_max_buffers)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct acp_state *state = &acp->state;
if (!state->in_progress) {
/* get next chunk to clean */
state->chunk = _acp_get_cleaning_candidate(cache);
if (!state->chunk) {
/* nothing co clean */
return NOTHING_TO_CLEAN;
}
/* new cleaning cycle - reset state */
state->iter = 0;
state->in_progress = true;
}
state->iter = _acp_clean(cache, io_queue, state->chunk, state->iter,
flush_max_buffers);
if (state->iter == CHUNK_FINISHED) {
/* reached end of chunk - reset state */
state->in_progress = false;
}
return MORE_TO_CLEAN;
}
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
uint32_t io_queue)
{
struct acp_cleaning_policy_config *config;
int ret;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data;
if (NOTHING_TO_CLEAN == _acp_clean_iteration(cache, io_queue,
config->flush_max_buffers)) {
ret = ACP_BACKOFF_TIME_MS;
} else {
ret = config->thread_wakeup_time;
}
return ret;
}
static void _acp_update_bucket(struct acp_context *acp,
struct acp_chunk_info *chunk)
{
struct acp_bucket *bucket = &acp->bucket_info[chunk->bucket_id];
if (chunk->num_dirty > bucket->threshold) {
ENV_BUG_ON(chunk->bucket_id == ACP_MAX_BUCKETS - 1);
chunk->bucket_id++;
/* buckets are stored in array, move up one bucket.
* No overflow here. ENV_BUG_ON made sure of no incrementation on
* last bucket */
bucket++;
list_move_tail(&chunk->list, &bucket->chunk_list);
} else if (chunk->bucket_id &&
chunk->num_dirty <= (bucket - 1)->threshold) {
chunk->bucket_id--;
/* move down one bucket, we made sure we won't underflow */
bucket--;
list_move(&chunk->list, &bucket->chunk_list);
}
}
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
struct acp_chunk_info *chunk;
ACP_LOCK_CHUNKS_WR();
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
chunk = _acp_get_chunk(cache, cache_line);
if (!acp_meta->dirty) {
acp_meta->dirty = 1;
_acp_meta_set(cache, cache_line, &policy_meta);
chunk->num_dirty++;
}
_acp_update_bucket(acp, chunk);
ACP_UNLOCK_CHUNKS_WR();
}
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
struct cleaning_policy_meta policy_meta;
struct acp_cleaning_policy_meta *acp_meta;
struct acp_chunk_info *chunk;
acp_meta = _acp_meta_get(cache, cache_line, &policy_meta);
chunk = _acp_get_chunk(cache, cache_line);
if (acp_meta->dirty) {
acp_meta->dirty = 0;
_acp_meta_set(cache, cache_line, &policy_meta);
chunk->num_dirty--;
}
_acp_update_bucket(acp, chunk);
}
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID,
core_id, start_byte, end_byte,
cleaning_policy_acp_purge_block);
}
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
uint64_t i;
ENV_BUG_ON(acp->chunks_total < acp->num_chunks[core_id]);
if (acp->state.in_progress && acp->state.chunk->core_id == core_id) {
acp->state.in_progress = false;
acp->state.iter = 0;
acp->state.chunk = NULL;
}
ACP_LOCK_CHUNKS_WR();
for (i = 0; i < acp->num_chunks[core_id]; i++)
list_del(&acp->chunk_info[core_id][i].list);
acp->chunks_total -= acp->num_chunks[core_id];
acp->num_chunks[core_id] = 0;
env_vfree(acp->chunk_info[core_id]);
acp->chunk_info[core_id] = NULL;
ACP_UNLOCK_CHUNKS_WR();
}
int cleaning_policy_acp_add_core(ocf_cache_t cache,
ocf_core_id_t core_id)
{
uint64_t core_size = cache->core_conf_meta[core_id].length;
uint64_t num_chunks = DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE);
struct acp_context *acp = _acp_get_ctx_from_cache(cache);
int i;
OCF_DEBUG_PARAM(cache, "%s core_id %llu num_chunks %llu\n",
__func__, (uint64_t)core_id, (uint64_t) num_chunks);
ACP_LOCK_CHUNKS_WR();
ENV_BUG_ON(acp->chunk_info[core_id]);
acp->chunk_info[core_id] =
env_vzalloc(num_chunks * sizeof(acp->chunk_info[0][0]));
if (!acp->chunk_info[core_id]) {
ACP_UNLOCK_CHUNKS_WR();
OCF_DEBUG_PARAM(cache, "failed to allocate acp tables\n");
return -ENOMEM;
}
OCF_DEBUG_PARAM(cache, "successfully allocated acp tables\n");
/* increment counters */
acp->num_chunks[core_id] = num_chunks;
acp->chunks_total += num_chunks;
for (i = 0; i < acp->num_chunks[core_id]; i++) {
/* fill in chunk metadata and add to the clean bucket */
acp->chunk_info[core_id][i].core_id = core_id;
acp->chunk_info[core_id][i].chunk_id = i;
list_add(&acp->chunk_info[core_id][i].list,
&acp->bucket_info[0].chunk_list);
}
ACP_UNLOCK_CHUNKS_WR();
return 0;
}

45
src/cleaning/acp.h Normal file
View File

@ -0,0 +1,45 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
#define __LAYER_CLEANING_POLICY_AGGRESSIVE_H__
#include "cleaning.h"
void cleaning_policy_acp_setup(struct ocf_cache *cache);
int cleaning_policy_acp_initialize(struct ocf_cache *cache,
int init_metadata);
void cleaning_policy_acp_deinitialize(struct ocf_cache *cache);
int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache,
uint32_t io_queue);
void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_acp_purge_block(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_acp_purge_range(struct ocf_cache *cache,
int core_id, uint64_t start_byte, uint64_t end_byte);
int cleaning_policy_acp_set_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int cleaning_policy_acp_get_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
int cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id);
void cleaning_policy_acp_remove_core(ocf_cache_t cache,
ocf_core_id_t core_id);
#endif

View File

@ -0,0 +1,23 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CLEANING_AGGRESSIVE_STRUCTS_H__
#define __CLEANING_AGGRESSIVE_STRUCTS_H__
#include "../utils/utils_cleaner.h"
/* TODO: remove acp metadata */
struct acp_cleaning_policy_meta {
uint8_t dirty : 1;
};
/* cleaning policy per partition metadata */
struct acp_cleaning_policy_config {
uint32_t thread_wakeup_time; /* in milliseconds*/
uint32_t flush_max_buffers; /* in lines */
};
#endif

802
src/cleaning/alru.c Normal file
View File

@ -0,0 +1,802 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cleaning.h"
#include "alru.h"
#include "../metadata/metadata.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_part.h"
#include "../utils/utils_allocator.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "../ocf_def_priv.h"
#include "cleaning_priv.h"
#define is_alru_head(x) (x == collision_table_entries)
#define is_alru_tail(x) (x == collision_table_entries)
#define OCF_CLEANING_DEBUG 0
#if 1 == OCF_CLEANING_DEBUG
#define OCF_DEBUG_PREFIX "[Clean] %s():%d "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", __func__, __LINE__, ##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
struct flush_merge_struct {
ocf_cache_line_t cache_line;
ocf_core_id_t core_id;
uint64_t core_sector;
};
/* -- Start of ALRU functions -- */
/* Sets the given collision_index as the new _head_ of the ALRU list. */
static inline void update_alru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the ALRU list. */
static inline void update_alru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
part->runtime->cleaning.policy.alru.lru_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_
* of the ALRU list.
*/
static inline void update_alru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index)
{
update_alru_head(cache, partition_id, collision_index);
update_alru_tail(cache, partition_id, collision_index);
}
/* Adds the given collision_index to the _head_ of the ALRU list */
static void add_alru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
unsigned int curr_head_index;
unsigned int collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ENV_BUG_ON(env_atomic_read(
&part->runtime->cleaning.policy.alru.size) < 0);
ENV_WARN_ON(!metadata_test_dirty(cache, collision_index));
ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index));
/* First node to be added/ */
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
update_alru_head_tail(cache, partition_id, collision_index);
ocf_metadata_get_cleaning_policy(cache, collision_index,
&policy);
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
policy.meta.alru.timestamp = env_ticks_to_secs(
env_get_tick_count());
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
} else {
/* Not the first node to be added. */
curr_head_index = part->runtime->cleaning.policy.alru.lru_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, collision_index,
&policy);
policy.meta.alru.lru_next = curr_head_index;
policy.meta.alru.lru_prev = collision_table_entries;
policy.meta.alru.timestamp = env_ticks_to_secs(
env_get_tick_count());
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_get_cleaning_policy(cache, curr_head_index,
&policy);
policy.meta.alru.lru_prev = collision_index;
ocf_metadata_set_cleaning_policy(cache, curr_head_index,
&policy);
update_alru_head(cache, partition_id, collision_index);
}
env_atomic_inc(&part->runtime->cleaning.policy.alru.size);
}
/* Deletes the node with the given collision_index from the ALRU list */
static void remove_alru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) {
ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item "
"from empty ALRU Cleaning Policy queue!\n");
ENV_BUG();
}
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
/* Set prev and next (even if non existent) */
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
/* Check if entry is not part of the ALRU list */
if ((next_lru_node == collision_table_entries) &&
(prev_lru_node == collision_table_entries) &&
(cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail != collision_index)) {
return;
}
/* Case 0: If we are head AND tail, there is only one node. So unlink
* node and set that there is no node left in the list.
*/
if (cleaning_policy->lru_head == collision_index &&
cleaning_policy->lru_tail == collision_index) {
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
update_alru_head_tail(cache, partition_id,
collision_table_entries);
}
/* Case 1: else if this collision_index is ALRU head, but not tail,
* update head and return
*/
else if ((cleaning_policy->lru_tail != collision_index) &&
(cleaning_policy->lru_head == collision_index)) {
struct cleaning_policy_meta next_policy;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
&next_policy);
update_alru_head(cache, partition_id, next_lru_node);
policy.meta.alru.lru_next = collision_table_entries;
next_policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
&next_policy);
}
/* Case 2: else if this collision_index is ALRU tail, but not head,
* update tail and return
*/
else if ((cleaning_policy->lru_head != collision_index) &&
(cleaning_policy->lru_tail == collision_index)) {
struct cleaning_policy_meta prev_policy;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
&prev_policy);
update_alru_tail(cache, partition_id, prev_lru_node);
policy.meta.alru.lru_prev = collision_table_entries;
prev_policy.meta.alru.lru_next = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
&prev_policy);
}
/* Case 3: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
struct cleaning_policy_meta next_policy;
struct cleaning_policy_meta prev_policy;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, prev_lru_node,
&prev_policy);
ocf_metadata_get_cleaning_policy(cache, next_lru_node,
&next_policy);
/* Update prev and next nodes */
prev_policy.meta.alru.lru_next = policy.meta.alru.lru_next;
next_policy.meta.alru.lru_prev = policy.meta.alru.lru_prev;
/* Update the given node */
policy.meta.alru.lru_next = collision_table_entries;
policy.meta.alru.lru_prev = collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, collision_index,
&policy);
ocf_metadata_set_cleaning_policy(cache, prev_lru_node,
&prev_policy);
ocf_metadata_set_cleaning_policy(cache, next_lru_node,
&next_policy);
}
env_atomic_dec(&part->runtime->cleaning.policy.alru.size);
}
static bool is_on_alru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index)
{
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
struct alru_cleaning_policy *cleaning_policy =
&part->runtime->cleaning.policy.alru;
struct cleaning_policy_meta policy;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_cleaning_policy(cache, collision_index, &policy);
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
return cleaning_policy->lru_tail == collision_index ||
cleaning_policy->lru_head == collision_index ||
next_lru_node != collision_table_entries ||
prev_lru_node != collision_table_entries;
}
/* -- End of ALRU functions -- */
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
struct cleaning_policy_meta policy;
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
policy.meta.alru.timestamp = 0;
policy.meta.alru.lru_prev = cache->device->collision_table_entries;
policy.meta.alru.lru_next = cache->device->collision_table_entries;
ocf_metadata_set_cleaning_policy(cache, cache_line, &policy);
}
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
remove_alru_list(cache, part_id, cache_line);
}
static void __cleaning_policy_alru_purge_cache_block_any(
struct ocf_cache *cache, uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
if (is_on_alru_list(cache, part_id, cache_line))
remove_alru_list(cache, part_id, cache_line);
}
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte) {
struct ocf_user_part *part;
ocf_part_id_t part_id;
int ret = 0;
for_each_part(cache, part, part_id) {
if (env_atomic_read(&part->runtime->cleaning.
policy.alru.size) == 0)
continue;
ret |= ocf_metadata_actor(cache, part_id,
core_id, start_byte, end_byte,
__cleaning_policy_alru_purge_cache_block_any);
}
return ret;
}
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache,
cache_line);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct cleaning_policy_meta policy;
ENV_WARN_ON(!metadata_test_dirty(cache, cache_line));
ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line));
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
next_lru_node = policy.meta.alru.lru_next;
prev_lru_node = policy.meta.alru.lru_prev;
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->cleaning.policy.
alru.lru_head == cache_line) &&
(part->runtime->cleaning.policy.
alru.lru_tail == cache_line)))
remove_alru_list(cache, part_id, cache_line);
add_alru_head(cache, part_id, cache_line);
}
static void _alru_rebuild(struct ocf_cache *cache)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
ocf_core_id_t core_id;
ocf_cache_line_t cline;
uint32_t step = 0;
for_each_part(cache, part, part_id) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
cache->device->runtime_meta->cleaning_thread_access = 0;
}
for (cline = 0; cline < cache->device->collision_table_entries; cline++) {
ocf_metadata_get_core_and_part_id(cache, cline, &core_id,
NULL);
OCF_COND_RESCHED_DEFAULT(step);
if (core_id == OCF_CORE_MAX)
continue;
cleaning_policy_alru_init_cache_block(cache, cline);
if (!metadata_test_dirty(cache, cline))
continue;
cleaning_policy_alru_set_hot_cache_line(cache, cline);
}
}
static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache,
struct ocf_user_part *part, int init_metadata)
{
if (init_metadata) {
/* ALRU initialization */
env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0);
part->runtime->cleaning.policy.alru.lru_head =
cache->device->collision_table_entries;
part->runtime->cleaning.policy.alru.lru_tail =
cache->device->collision_table_entries;
}
cache->device->runtime_meta->cleaning_thread_access = 0;
return 0;
}
void cleaning_policy_alru_setup(struct ocf_cache *cache)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
config->thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP;
config->stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME;
config->flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS;
config->activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD;
}
int cleaning_policy_alru_initialize(struct ocf_cache *cache, int init_metadata)
{
struct ocf_user_part *part;
ocf_part_id_t part_id;
for_each_part(cache, part, part_id) {
cleaning_policy_alru_initialize_part(cache,
part, init_metadata);
}
if (init_metadata)
_alru_rebuild(cache);
return 0;
}
int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t param_value)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
switch (param_id) {
case ocf_alru_wake_up_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_WAKE_UP,
OCF_ALRU_MAX_WAKE_UP,
"thread_wakeup_time");
config->thread_wakeup_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"wake-up time: %d\n", config->thread_wakeup_time);
break;
case ocf_alru_stale_buffer_time:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_STALENESS_TIME,
OCF_ALRU_MAX_STALENESS_TIME,
"stale_buffer_time");
config->stale_buffer_time = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"staleness time: %d\n", config->stale_buffer_time);
break;
case ocf_alru_flush_max_buffers:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_FLUSH_MAX_BUFFERS,
OCF_ALRU_MAX_FLUSH_MAX_BUFFERS,
"flush_max_buffers");
config->flush_max_buffers = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread max "
"buffers flushed per iteration: %d\n",
config->flush_max_buffers);
break;
case ocf_alru_activity_threshold:
OCF_CLEANING_CHECK_PARAM(cache, param_value,
OCF_ALRU_MIN_ACTIVITY_THRESHOLD,
OCF_ALRU_MAX_ACTIVITY_THRESHOLD,
"activity_threshold");
config->activity_threshold = param_value;
ocf_cache_log(cache, log_info, "Write-back flush thread "
"activity time threshold: %d\n",
config->activity_threshold);
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache,
uint32_t param_id, uint32_t *param_value)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
switch (param_id) {
case ocf_alru_wake_up_time:
*param_value = config->thread_wakeup_time;
break;
case ocf_alru_stale_buffer_time:
*param_value = config->stale_buffer_time;
break;
case ocf_alru_flush_max_buffers:
*param_value = config->flush_max_buffers;
break;
case ocf_alru_activity_threshold:
*param_value = config->activity_threshold;
break;
default:
return -OCF_ERR_INVAL;
}
return 0;
}
static inline uint32_t compute_timestamp(
const struct alru_cleaning_policy_config *config)
{
unsigned long time;
time = env_get_tick_count();
time -= env_secs_to_ticks(config->stale_buffer_time);
time = env_ticks_to_secs(time);
return (uint32_t) time;
}
static int check_for_io_activity(struct ocf_cache *cache,
struct alru_cleaning_policy_config *config)
{
unsigned int now, last;
now = env_ticks_to_msecs(env_get_tick_count());
last = env_atomic_read(&cache->last_access_ms);
if ((now - last) < config->activity_threshold)
return 1;
return 0;
}
static int cmp_ocf_user_parts(const void *p1, const void *p2) {
const struct ocf_user_part *t1 = *(const struct ocf_user_part**)p1;
const struct ocf_user_part *t2 = *(const struct ocf_user_part**)p2;
if (t1->config->priority > t2->config->priority)
return 1;
else if (t1->config->priority < t2->config->priority)
return -1;
return 0;
}
static void swp_ocf_user_part(void *part1, void *part2, int size) {
void *tmp = *(void **)part1;
*(void **)part1 = *(void **) part2;
*(void **)part2 = tmp;
}
static void get_parts_sorted(struct ocf_user_part **parts,
struct ocf_cache *cache) {
int i;
for (i = 0; i < OCF_IO_CLASS_MAX; i++)
parts[i] = &cache->user_parts[i];
env_sort(parts, OCF_IO_CLASS_MAX, sizeof(struct ocf_user_part*),
cmp_ocf_user_parts, swp_ocf_user_part);
}
static int clean_later(ocf_cache_t cache, uint32_t *delta)
{
struct alru_cleaning_policy_config *config;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
*delta = env_ticks_to_secs(env_get_tick_count()) -
cache->device->runtime_meta->cleaning_thread_access;
if (*delta <= config->thread_wakeup_time)
return true;
return false;
}
static void get_block_to_flush(struct flush_data* dst,
ocf_cache_line_t cache_line, struct ocf_cache* cache)
{
ocf_core_id_t core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&core_id, &core_line);
dst->cache_line = cache_line;
dst->core_id = core_id;
dst->core_line = core_line;
}
static int more_blocks_to_flush(struct ocf_cache *cache,
ocf_cache_line_t cache_line, uint32_t last_access)
{
struct cleaning_policy_meta policy;
if (cache_line >= cache->device->collision_table_entries)
return false;
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
if (policy.meta.alru.timestamp >= last_access)
return false;
return true;
}
static int block_is_busy(struct ocf_cache *cache,
ocf_cache_line_t cache_line)
{
ocf_core_id_t core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&core_id, &core_line);
if (!cache->core_obj[core_id].opened)
return true;
if (ocf_cache_line_is_used(cache, cache_line))
return true;
return false;
}
static int get_data_to_flush(struct flush_data *dst, uint32_t clines_no,
struct ocf_cache *cache, struct ocf_user_part *part)
{
struct alru_cleaning_policy_config *config;
struct cleaning_policy_meta policy;
ocf_cache_line_t cache_line;
int to_flush = 0;
uint32_t last_access;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
cache_line = part->runtime->cleaning.policy.alru.lru_tail;
last_access = compute_timestamp(config);
OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d",
last_access, policy.meta.alru.timestamp,
policy.meta.alru.timestamp < last_access);
while (to_flush < clines_no &&
more_blocks_to_flush(cache, cache_line, last_access)) {
if (!block_is_busy(cache, cache_line)) {
get_block_to_flush(&dst[to_flush], cache_line, cache);
to_flush++;
}
ocf_metadata_get_cleaning_policy(cache, cache_line, &policy);
cache_line = policy.meta.alru.lru_prev;
}
OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush);
return to_flush;
}
static int perform_flushing(int clines_no, struct ocf_cache *cache, uint32_t io_queue,
struct flush_data *flush_data, struct ocf_user_part *part)
{
int to_clean = get_data_to_flush(flush_data, clines_no, cache, part);
if (to_clean > 0) {
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = true,
.metadata_locked = true,
.do_sort = true,
.io_queue = io_queue
};
ocf_cleaner_do_flush_data(cache, flush_data,
to_clean, &attribs);
} else {
/* Update timestamp only if there are no items to be cleaned */
cache->device->runtime_meta->cleaning_thread_access =
env_ticks_to_secs(env_get_tick_count());
}
return to_clean;
}
static int is_cleanup_possible(ocf_cache_t cache)
{
struct alru_cleaning_policy_config *config;
uint32_t delta;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
if (check_for_io_activity(cache, config)) {
OCF_DEBUG_PARAM(cache, "IO activity detected");
return false;
}
if (clean_later(cache, &delta)) {
OCF_DEBUG_PARAM(cache,
"Cleaning policy configured to clean later "
"delta=%u wake_up=%u", delta,
config->thread_wakeup_time);
return false;
}
//Cleaning policy configured to not clean anything
if (config->flush_max_buffers == 0)
return false;
return true;
}
static int cleanup(struct ocf_cache *cache, uint32_t clines_no,
struct ocf_user_part *part, uint32_t io_queue)
{
struct flush_data *flush_data;
size_t flush_data_limit;
int flushed_blocks = 0;
if (!is_cleanup_possible(cache))
return flushed_blocks;
if (OCF_METADATA_LOCK_WR_TRY())
return flushed_blocks;
OCF_REALLOC_INIT(&flush_data, &flush_data_limit);
OCF_REALLOC(&flush_data, sizeof(flush_data[0]), clines_no,
&flush_data_limit);
if (!flush_data) {
OCF_METADATA_UNLOCK_WR();
ocf_cache_log(cache, log_warn, "No memory to allocate flush "
"data for ALRU cleaning policy");
return flushed_blocks;
}
flushed_blocks = perform_flushing(clines_no, cache, io_queue,
flush_data, part);
OCF_METADATA_UNLOCK_WR();
OCF_REALLOC_DEINIT(&flush_data, &flush_data_limit);
return flushed_blocks;
}
int cleaning_alru_perform_cleaning(ocf_cache_t cache, uint32_t io_queue)
{
struct ocf_user_part *parts[OCF_IO_CLASS_MAX];
int part_id = OCF_IO_CLASS_MAX - 1;
struct alru_cleaning_policy_config *config;
uint32_t clines_no;
config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data;
get_parts_sorted(parts, cache);
clines_no = config->flush_max_buffers;
while (part_id >= 0) {
clines_no -= cleanup(cache, clines_no,
parts[part_id], io_queue);
if (clines_no > 0)
part_id--;
else
break;
}
if (clines_no > 0)
return config->thread_wakeup_time * 1000;
return 0;
}

30
src/cleaning/alru.h Normal file
View File

@ -0,0 +1,30 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_ALRU_H__
#define __LAYER_CLEANING_POLICY_ALRU_H__
#include "cleaning.h"
#include "alru_structs.h"
void cleaning_policy_alru_setup(struct ocf_cache *cache);
int cleaning_policy_alru_initialize(struct ocf_cache *cache,
int init_metadata);
void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache,
uint32_t cache_line);
int cleaning_policy_alru_set_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int cleaning_policy_alru_get_cleaning_param(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
int cleaning_alru_perform_cleaning(struct ocf_cache *cache, uint32_t io_queue);
#endif

View File

@ -0,0 +1,32 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CLEANING_ALRU_STRUCTS_H__
#define __CLEANING_ALRU_STRUCTS_H__
#include "ocf/ocf.h"
#include "ocf_env.h"
struct alru_cleaning_policy_meta {
/* Lru pointers 2*4=8 bytes */
uint32_t timestamp;
uint32_t lru_prev;
uint32_t lru_next;
} __attribute__((packed));
struct alru_cleaning_policy_config {
uint32_t thread_wakeup_time; /* in seconds */
uint32_t stale_buffer_time; /* in seconds */
uint32_t flush_max_buffers; /* in lines */
uint32_t activity_threshold; /* in milliseconds */
};
struct alru_cleaning_policy {
env_atomic size;
uint32_t lru_head;
uint32_t lru_tail;
};
#endif

137
src/cleaning/cleaning.c Normal file
View File

@ -0,0 +1,137 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "cleaning.h"
#include "alru.h"
#include "acp.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "../mngt/ocf_mngt_common.h"
#include "../metadata/metadata.h"
#define SLEEP_TIME_MS (1000)
struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = {
[ocf_cleaning_nop] = {
.name = "nop",
},
[ocf_cleaning_alru] = {
.setup = cleaning_policy_alru_setup,
.init_cache_block = cleaning_policy_alru_init_cache_block,
.purge_cache_block = cleaning_policy_alru_purge_cache_block,
.purge_range = cleaning_policy_alru_purge_range,
.set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line,
.initialize = cleaning_policy_alru_initialize,
.set_cleaning_param = cleaning_policy_alru_set_cleaning_param,
.get_cleaning_param = cleaning_policy_alru_get_cleaning_param,
.perform_cleaning = cleaning_alru_perform_cleaning,
.name = "alru",
},
[ocf_cleaning_acp] = {
.setup = cleaning_policy_acp_setup,
.init_cache_block = cleaning_policy_acp_init_cache_block,
.purge_cache_block = cleaning_policy_acp_purge_block,
.purge_range = cleaning_policy_acp_purge_range,
.set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line,
.initialize = cleaning_policy_acp_initialize,
.deinitialize = cleaning_policy_acp_deinitialize,
.set_cleaning_param = cleaning_policy_acp_set_cleaning_param,
.get_cleaning_param = cleaning_policy_acp_get_cleaning_param,
.add_core = cleaning_policy_acp_add_core,
.remove_core = cleaning_policy_acp_remove_core,
.perform_cleaning = cleaning_policy_acp_perform_cleaning,
.name = "acp",
},
};
int ocf_start_cleaner(struct ocf_cache *cache)
{
return ctx_cleaner_init(cache->owner, &cache->cleaner);
}
void ocf_stop_cleaner(struct ocf_cache *cache)
{
ctx_cleaner_stop(cache->owner, &cache->cleaner);
}
void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv)
{
OCF_CHECK_NULL(c);
c->priv = priv;
}
void *ocf_cleaner_get_priv(ocf_cleaner_t c)
{
OCF_CHECK_NULL(c);
return c->priv;
}
ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c)
{
OCF_CHECK_NULL(c);
return container_of(c, struct ocf_cache, cleaner);
}
static int _ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache)
{
int i;
if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state))
return 0;
for (i = 0; i < OCF_CORE_MAX; ++i) {
if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap))
continue;
if (cache->core_obj[i].opened && env_atomic_read(&(cache->
core_runtime_meta[i].dirty_clines))) {
return 0;
}
}
return 1;
}
uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue)
{
struct ocf_cache *cache;
ocf_cleaning_t clean_type;
int sleep = SLEEP_TIME_MS;
cache = ocf_cleaner_get_cache(c);
/* Do not involve cleaning when cache is not running
* (error, etc.).
*/
if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) ||
ocf_mngt_is_cache_locked(cache)) {
return SLEEP_TIME_MS;
}
/* Sleep in case there is management operation in progress. */
if (env_rwsem_down_write_trylock(&cache->lock) == 0)
return SLEEP_TIME_MS;
if (_ocf_cleaner_run_check_dirty_inactive(cache)) {
env_rwsem_up_write(&cache->lock);
return SLEEP_TIME_MS;
}
clean_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(clean_type >= ocf_cleaning_max);
/* Call cleaning. */
if (cleaning_policy_ops[clean_type].perform_cleaning) {
sleep = cleaning_policy_ops[clean_type].
perform_cleaning(cache, io_queue);
}
env_rwsem_up_write(&cache->lock);
return sleep;
}

75
src/cleaning/cleaning.h Normal file
View File

@ -0,0 +1,75 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_H__
#define __LAYER_CLEANING_POLICY_H__
#include "alru_structs.h"
#include "nop_structs.h"
#include "acp_structs.h"
#define CLEANING_POLICY_CONFIG_BYTES 256
#define CLEANING_POLICY_TYPE_MAX 4
struct ocf_request;
struct cleaning_policy_config {
uint8_t data[CLEANING_POLICY_CONFIG_BYTES];
struct acp_cleaning_policy_config acp;
};
struct cleaning_policy {
union {
struct nop_cleaning_policy nop;
struct alru_cleaning_policy alru;
} policy;
};
/* Cleaning policy metadata per cache line */
struct cleaning_policy_meta {
union {
struct nop_cleaning_policy_meta nop;
struct alru_cleaning_policy_meta alru;
struct acp_cleaning_policy_meta acp;
} meta;
};
struct cleaning_policy_ops {
void (*setup)(struct ocf_cache *cache);
int (*initialize)(struct ocf_cache *cache, int init_metadata);
void (*deinitialize)(struct ocf_cache *cache);
int (*add_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
void (*remove_core)(struct ocf_cache *cache, ocf_core_id_t core_id);
void (*init_cache_block)(struct ocf_cache *cache, uint32_t cache_line);
void (*purge_cache_block)(struct ocf_cache *cache,
uint32_t cache_line);
int (*purge_range)(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
void (*set_hot_cache_line)(struct ocf_cache *cache,
uint32_t cache_line);
int (*set_cleaning_param)(struct ocf_cache *cache,
uint32_t param_id, uint32_t param_value);
int (*get_cleaning_param)(struct ocf_cache *cache,
uint32_t param_id, uint32_t *param_value);
/**
* @brief Performs cleaning.
* @return requested time (in ms) of next call
*/
int (*perform_cleaning)(struct ocf_cache *cache,
uint32_t io_queue);
const char *name;
};
extern struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max];
struct ocf_cleaner {
void *priv;
};
int ocf_start_cleaner(struct ocf_cache *cache);
void ocf_stop_cleaner(struct ocf_cache *cache);
#endif

View File

@ -0,0 +1,19 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
static inline void cleaning_policy_param_error(ocf_cache_t cache,
const char *param_name, uint32_t min, uint32_t max)
{
ocf_cache_log(cache, log_err, "Refusing setting flush "
"parameters because parameter %s is not within range "
"of <%d-%d>\n", param_name, min, max);
}
#define OCF_CLEANING_CHECK_PARAM(CACHE, VAL, MIN, MAX, NAME) ({ \
if (VAL < MIN || VAL > MAX) { \
cleaning_policy_param_error(CACHE, NAME, MIN, MAX); \
return -OCF_ERR_INVAL; \
} \
})

View File

@ -0,0 +1,15 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
#define __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__
struct nop_cleaning_policy_meta {
} __attribute__((packed));
struct nop_cleaning_policy {
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_CACHE_CONCURRENCY_H_
#define OCF_CACHE_CONCURRENCY_H_
/**
* @file utils_rq.h
* @brief OCF cache concurrency module
*/
/**
* @brief OCF cache concurrency module handle
*/
struct ocf_cache_concurrency;
/**
* @brief Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_cache_concurrency_init(struct ocf_cache *cache);
/**
* @biref De-Initialize OCF cache concurrency module
*
* @param cache - OCF cache instance
*/
void ocf_cache_concurrency_deinit(struct ocf_cache *cache);
/**
* @brief Get number of waiting (suspended) OCF requests in due to cache
* overlapping
*
* @param cache - OCF cache instance
*
* @return Number of suspended OCF requests
*/
uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache);
/**
* @brief Return memory footprint conusmed by cache concurrency module
*
* @param cache - OCF cache instance
*
* @return Memory footprint of cache concurrency module
*/
size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache);
/**
* @brief Lock OCF request for WRITE access (Lock all cache lines in map info)
*
* @note rq->resume callback has to be set
*
* @param rq - OCF request
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_trylock_wr(struct ocf_request *rq);
/**
* @brief Try complete lock of OCF request for WRITE access (Lock cache lines
* that marked as invalid)
*
* @param rq - OCF request
*
* @note If request not locked it will be added into waiting list
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_retrylock_wr(struct ocf_request *rq);
/**
* @brief Lock OCF request for READ access (Lock all cache lines in map info)
*
* @note rq->resume callback has to be set
*
* @param rq - OCF request
*
* @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed
*
* @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was
* added into waiting list. When lock will be acquired rq->resume be called
*/
int ocf_rq_trylock_rd(struct ocf_request *rq);
/**
* @brief Unlock OCF request from WRITE access
*
* @param rq - OCF request
*/
void ocf_rq_unlock_wr(struct ocf_request *rq);
/**
* @brief Unlock OCF request from READ access
*
* @param rq - OCF request
*/
void ocf_rq_unlock_rd(struct ocf_request *rq);
/**
* @brief Unlock OCF request from READ or WRITE access
*
* @param rq - OCF request
*/
void ocf_rq_unlock(struct ocf_request *rq);
/**
* @Check if cache line is used.
*
* Cache line is used when:
* 1. It is locked for write or read access
* or
* 2. There is set locked bit in metadata
*
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*
* @retval true - cache line is used
* @retval false - cache line is not used
*/
bool ocf_cache_line_is_used(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief Check if for specified cache line there are waiters
* on the waiting list
*
* @param cache - OCF cache instance
* @param line - Cache line to be checked for waiters
*
* @retval true - there are waiters
* @retval false - No waiters
*/
bool ocf_cache_line_are_waiters(struct ocf_cache *cache,
ocf_cache_line_t line);
/**
* @brief un_lock request map info entry from from WRITE or READ access.
*
* @param cache - OCF cache instance
* @param rq - OCF request
* @param entry - request map entry number
*/
void ocf_rq_unlock_entry(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry);
/**
* @brief Release cache line read lock
*
* @param cache - OCF cache instance
* @param line - Cache line to be unlocked
*/
void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Attempt to lock cache line for read
*
* @param cache - OCF cache instance
* @param line - Cache line to be checked for waiters
*
* @retval true - read lock successfully acquired
* @retval false - failed to acquire read lock
*/
bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line);
#endif /* OCF_CONCURRENCY_H_ */

View File

@ -0,0 +1,24 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_concurrency.h"
int ocf_concurrency_init(struct ocf_cache *cache)
{
int result = 0;
result = ocf_cache_concurrency_init(cache);
if (result)
ocf_concurrency_deinit(cache);
return result;
}
void ocf_concurrency_deinit(struct ocf_cache *cache)
{
ocf_cache_concurrency_deinit(cache);
}

View File

@ -0,0 +1,43 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_CONCURRENCY_H_
#define OCF_CONCURRENCY_H_
#include "../ocf_cache_priv.h"
/**
* @file utils_rq.h
* @brief OCF concurrency
*/
/**
* @brief Lock result - Lock acquired successfully
*/
#define OCF_LOCK_ACQUIRED 0
/**
* @brief Lock result - Lock not acquired, lock request added into waiting list
*/
#define OCF_LOCK_NOT_ACQUIRED 1
/**
* @brief Initialize OCF concurrency module
*
* @param cache - OCF cache instance
* @return 0 - Initialization successful, otherwise ERROR
*/
int ocf_concurrency_init(struct ocf_cache *cache);
/**
* @biref De-Initialize OCF concurrency module
*
* @param cache - OCF cache instance
*/
void ocf_concurrency_deinit(struct ocf_cache *cache);
#include "ocf_cache_concurrency.h"
#endif /* OCF_CONCURRENCY_H_ */

314
src/engine/cache_engine.c Normal file
View File

@ -0,0 +1,314 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_rd.h"
#include "engine_wt.h"
#include "engine_pt.h"
#include "engine_wi.h"
#include "engine_wa.h"
#include "engine_wb.h"
#include "engine_fast.h"
#include "engine_discard.h"
#include "engine_d2c.h"
#include "engine_ops.h"
#include "../utils/utils_part.h"
#include "../utils/utils_rq.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
enum ocf_io_if_type {
/* Public OCF IO interfaces to be set by user */
OCF_IO_WT_IF,
OCF_IO_WB_IF,
OCF_IO_WA_IF,
OCF_IO_WI_IF,
OCF_IO_PT_IF,
OCF_IO_MAX_IF,
/* Private OCF interfaces */
OCF_IO_FAST_IF,
OCF_IO_DISCARD_IF,
OCF_IO_D2C_IF,
OCF_IO_OPS_IF,
OCF_IO_PRIV_MAX_IF,
};
static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = {
[OCF_IO_WT_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wt,
.name = "Write Through"
},
[OCF_IO_WB_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wb,
.name = "Write Back"
},
[OCF_IO_WA_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wa,
.name = "Write Around"
},
[OCF_IO_WI_IF] = {
.read = ocf_read_generic,
.write = ocf_write_wi,
.name = "Write Invalidate"
},
[OCF_IO_PT_IF] = {
.read = ocf_read_pt,
.write = ocf_write_wi,
.name = "Pass Through",
},
[OCF_IO_FAST_IF] = {
.read = ocf_read_fast,
.write = ocf_write_fast,
.name = "Fast",
},
[OCF_IO_DISCARD_IF] = {
.read = ocf_discard,
.write = ocf_discard,
.name = "Discard",
},
[OCF_IO_D2C_IF] = {
.read = ocf_io_d2c,
.write = ocf_io_d2c,
.name = "Direct to core",
},
[OCF_IO_OPS_IF] = {
.read = ocf_engine_ops,
.write = ocf_engine_ops,
.name = "Ops engine",
},
};
static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = {
[ocf_req_cache_mode_wt] = &IO_IFS[OCF_IO_WT_IF],
[ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF],
[ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF],
[ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF],
[ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF],
[ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF],
[ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF],
};
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode)
{
if (req_cache_mode == ocf_req_cache_mode_max)
return NULL;
return cache_mode_io_if_map[req_cache_mode];
}
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_queue *q)
{
unsigned long lock_flags;
struct ocf_request *rq;
OCF_CHECK_NULL(q);
/* LOCK */
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
if (list_empty(&q->io_list)) {
/* No items on the list */
env_spinlock_unlock_irqrestore(&q->io_list_lock,
lock_flags);
return NULL;
}
/* Get the first request and remove it from the list */
rq = list_first_entry(&q->io_list, struct ocf_request, list);
env_atomic_dec(&q->io_no);
list_del(&rq->list);
/* UNLOCK */
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
OCF_CHECK_NULL(rq);
if (ocf_rq_alloc_map(rq)) {
rq->complete(rq, rq->error);
return NULL;
}
return rq;
}
bool ocf_fallback_pt_is_on(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
return (cache->fallback_pt_error_threshold !=
OCF_CACHE_FALLBACK_PT_INACTIVE &&
env_atomic_read(&cache->fallback_pt_error_counter) >=
cache->fallback_pt_error_threshold);
}
#define SEQ_CUTOFF_FULL_MARGIN \
(OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT)
static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache)
{
if (!env_atomic_read(&cache->attached))
return false;
return (cache->device->freelist_part->curr_size <= SEQ_CUTOFF_FULL_MARGIN);
}
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes)
{
ocf_cache_t cache = ocf_core_get_cache(core);
ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core);
switch (policy) {
case ocf_seq_cutoff_policy_always:
break;
case ocf_seq_cutoff_policy_full:
if (ocf_seq_cutoff_is_on(cache))
break;
case ocf_seq_cutoff_policy_never:
return false;
default:
ENV_WARN(true, "Invalid sequential cutoff policy!");
return false;
}
if (dir == core->seq_cutoff.rw &&
core->seq_cutoff.last == addr &&
core->seq_cutoff.bytes + bytes >=
ocf_core_get_seq_cutoff_threshold(core)) {
return true;
}
return false;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req)
{
/*
* If IO is not consequent or has another direction,
* reset sequential cutoff state.
*/
if (req->byte_position != core->seq_cutoff.last ||
req->rw != core->seq_cutoff.rw) {
core->seq_cutoff.rw = req->rw;
core->seq_cutoff.bytes = 0;
}
/* Update last accessed position and bytes counter */
core->seq_cutoff.last = req->byte_position + req->byte_length;
core->seq_cutoff.bytes += req->byte_length;
}
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_io *io)
{
ocf_cache_mode_t mode;
if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes))
return ocf_cache_mode_pt;
mode = ocf_part_get_cache_mode(cache,
ocf_part_class2id(cache, io->class));
if (!ocf_cache_mode_is_valid(mode))
mode = cache->conf_meta->cache_mode;
if (ocf_seq_cutoff_check(core, io->dir, io->addr, io->bytes))
mode = ocf_cache_mode_pt;
if (ocf_fallback_pt_is_on(cache))
mode = ocf_cache_mode_pt;
if (mode == ocf_cache_mode_wb &&
env_atomic_read(&cache->flush_started))
mode = ocf_cache_mode_wt;
return mode;
}
int ocf_engine_hndl_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode)
{
ocf_cache_t cache = rq->cache;
OCF_CHECK_NULL(cache);
rq->io_if = ocf_get_io_if(req_cache_mode);
if (!rq->io_if)
return -EINVAL;
/* Till OCF engine is not synchronous fully need to push OCF request
* to into OCF workers
*/
ocf_engine_push_rq_back(rq, true);
return 0;
}
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode)
{
const struct ocf_io_if *io_if;
io_if = ocf_get_io_if(req_cache_mode);
if (!io_if)
return -EINVAL;
switch (rq->rw) {
case OCF_READ:
return io_if->read(rq);
case OCF_WRITE:
return io_if->write(rq);
default:
return OCF_FAST_PATH_NO;
}
}
static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq)
{
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_D2C_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_D2C_IF].write(rq);
else
ENV_BUG();
}
void ocf_engine_hndl_discard_rq(struct ocf_request *rq)
{
if (rq->d2c) {
ocf_engine_hndl_2dc_rq(rq);
return;
}
if (OCF_READ == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].read(rq);
else if (OCF_WRITE == rq->rw)
IO_IFS[OCF_IO_DISCARD_IF].write(rq);
else
ENV_BUG();
}
void ocf_engine_hndl_ops_rq(struct ocf_request *rq)
{
if (rq->d2c)
rq->io_if = &IO_IFS[OCF_IO_D2C_IF];
else
rq->io_if = &IO_IFS[OCF_IO_OPS_IF];
ocf_engine_push_rq_back(rq, true);
}

82
src/engine/cache_engine.h Normal file
View File

@ -0,0 +1,82 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CACHE_ENGINE_H_
#define __CACHE_ENGINE_H_
struct ocf_thread_priv;
struct ocf_request;
#define LOOKUP_HIT 5
#define LOOKUP_MISS 6
#define LOOKUP_MAPPED 8
typedef enum {
/* modes inherited from user API */
ocf_req_cache_mode_wt = ocf_cache_mode_wt,
ocf_req_cache_mode_wb = ocf_cache_mode_wb,
ocf_req_cache_mode_wa = ocf_cache_mode_wa,
ocf_req_cache_mode_pt = ocf_cache_mode_pt,
ocf_req_cache_mode_wi = ocf_cache_mode_wi,
/* internal modes */
ocf_req_cache_mode_fast,
/*!< Fast path */
ocf_req_cache_mode_d2c,
/*!< Direct to Core - pass through to core without
touching cacheline metadata */
ocf_req_cache_mode_max,
} ocf_req_cache_mode_t;
struct ocf_io_if {
int (*read)(struct ocf_request *req);
int (*write)(struct ocf_request *req);
const char *name;
};
ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache,
ocf_core_t core, struct ocf_io *io);
const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t cache_mode);
static inline const char *ocf_get_io_iface_name(ocf_cache_mode_t cache_mode)
{
const struct ocf_io_if *iface = ocf_get_io_if(cache_mode);
return iface ? iface->name : "Unknown";
}
static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode)
{
return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max;
}
void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req);
bool ocf_fallback_pt_is_on(ocf_cache_t cache);
bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr,
uint64_t bytes);
struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache,
struct ocf_queue *q);
int ocf_engine_hndl_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode);
#define OCF_FAST_PATH_YES 7
#define OCF_FAST_PATH_NO 13
int ocf_engine_hndl_fast_rq(struct ocf_request *rq,
ocf_req_cache_mode_t req_cache_mode);
void ocf_engine_hndl_discard_rq(struct ocf_request *rq);
void ocf_engine_hndl_ops_rq(struct ocf_request *rq);
#endif

105
src/engine/engine_bf.c Normal file
View File

@ -0,0 +1,105 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
#include "engine_bf.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "bf"
#include "engine_debug.h"
/* Decrements and checks if queue may be unblocked again */
static inline void backfill_queue_dec_unblock(struct ocf_cache *cache)
{
env_atomic_dec(&cache->pending_read_misses_list_count);
if (!env_atomic_read(&cache->pending_read_misses_list_blocked))
return;
if (env_atomic_read(&cache->pending_read_misses_list_count)
< cache->backfill.queue_unblock_size)
env_atomic_set(&cache->pending_read_misses_list_blocked, 0);
}
static inline void backfill_queue_inc_block(struct ocf_cache *cache)
{
if (env_atomic_inc_return(&cache->pending_read_misses_list_count)
>= cache->backfill.max_queue_size)
env_atomic_set(&cache->pending_read_misses_list_blocked, 1);
}
static void _ocf_backfill_do_io(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *)private_data;
struct ocf_cache *cache = rq->cache;
if (error)
rq->error = error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
/* We must free the pages we have allocated */
ctx_data_secure_erase(cache->owner, rq->data);
ctx_data_munlock(cache->owner, rq->data);
ctx_data_free(cache->owner, rq->data);
rq->data = NULL;
if (rq->error) {
env_atomic_inc(&cache->core_obj[rq->core_id].
counters->cache_errors.write);
ocf_engine_invalidate(rq);
} else {
ocf_rq_unlock(rq);
/* always free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
}
}
}
static int _ocf_backfill_do(struct ocf_request *rq)
{
unsigned int reqs_to_issue;
backfill_queue_dec_unblock(rq->cache);
reqs_to_issue = ocf_engine_io_count(rq);
/* There will be #reqs_to_issue completions */
env_atomic_set(&rq->req_remaining, reqs_to_issue);
rq->data = rq->cp_data;
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue,
_ocf_backfill_do_io, rq);
return 0;
}
static const struct ocf_io_if _io_if_backfill = {
.read = _ocf_backfill_do,
.write = _ocf_backfill_do,
};
void ocf_engine_backfill(struct ocf_request *rq)
{
backfill_queue_inc_block(rq->cache);
ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true);
}

11
src/engine/engine_bf.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_BF_H_
#define ENGINE_BF_H_
void ocf_engine_backfill(struct ocf_request *rq);
#endif /* ENGINE_BF_H_ */

621
src/engine/engine_common.c Normal file
View File

@ -0,0 +1,621 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "../ocf_queue_priv.h"
#include "engine_common.h"
#define OCF_ENGINE_DEBUG_IO_NAME "common"
#include "engine_debug.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cleaner.h"
#include "../metadata/metadata.h"
#include "../layer_space_management.h"
void ocf_engine_error(struct ocf_request *rq,
bool stop_cache, const char *msg)
{
struct ocf_cache *cache = rq->cache;
if (stop_cache)
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
ocf_core_log(&cache->core_obj[rq->core_id], log_err,
"%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg,
BYTES_TO_SECTORS(rq->byte_position), rq->byte_length);
}
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line)
{
ocf_cache_line_t line;
ocf_cache_line_t hash_key;
hash_key = ocf_metadata_hash_func(cache, core_line, core_id);
/* Initially assume that we have cache miss.
* Hash points to proper bucket.
*/
entry->hash_key = hash_key;
entry->status = LOOKUP_MISS;
entry->coll_idx = cache->device->collision_table_entries;
entry->core_line = core_line;
line = ocf_metadata_get_hash(cache, hash_key);
while (line != cache->device->collision_table_entries) {
ocf_core_id_t curr_core_id;
uint64_t curr_core_line;
ocf_metadata_get_core_info(cache, line, &curr_core_id,
&curr_core_line);
if (core_id == curr_core_id && curr_core_line == core_line) {
entry->coll_idx = line;
entry->status = LOOKUP_HIT;
break;
}
line = ocf_metadata_get_collision_next(cache, line);
}
}
static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id)
{
ocf_core_id_t _core_id;
uint64_t _core_line;
if (entry->status == LOOKUP_MISS)
return 0;
ENV_BUG_ON(entry->coll_idx >= cache->device->collision_table_entries);
ocf_metadata_get_core_info(cache, entry->coll_idx, &_core_id,
&_core_line);
if (core_id == _core_id && _core_line == entry->core_line)
return 0;
else
return -1;
}
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry)
{
uint8_t start_sector = 0;
uint8_t end_sector = ocf_line_end_sector(cache);
struct ocf_map_info *_entry = &(rq->map[entry]);
if (entry == 0) {
start_sector = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (entry == rq->core_line_count - 1) {
end_sector = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1)% ocf_line_sectors(cache);
}
/* Handle return value */
switch (_entry->status) {
case LOOKUP_HIT:
if (metadata_test_valid_sec(cache, _entry->coll_idx,
start_sector, end_sector)) {
rq->info.hit_no++;
} else {
rq->info.invalid_no++;
}
/* Check request is dirty */
if (metadata_test_dirty(cache, _entry->coll_idx)) {
rq->info.dirty_any++;
/* Check if cache line is fully dirty */
if (metadata_test_dirty_all(cache, _entry->coll_idx))
rq->info.dirty_all++;
}
if (rq->part_id != ocf_metadata_get_partition_id(cache,
_entry->coll_idx)) {
/*
* Need to move this cache line into other partition
*/
_entry->re_part = rq->info.re_part = true;
}
break;
case LOOKUP_MISS:
rq->info.seq_req = false;
break;
case LOOKUP_MAPPED:
break;
default:
ENV_BUG();
break;
}
/* Check if cache hit is sequential */
if (rq->info.seq_req && entry) {
if (ocf_metadata_map_lg2phy(cache,
(rq->map[entry - 1].coll_idx)) + 1 !=
ocf_metadata_map_lg2phy(cache,
_entry->coll_idx)) {
rq->info.seq_req = false;
}
}
}
void ocf_engine_traverse(struct ocf_request *rq)
{
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
OCF_DEBUG_TRACE(rq->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id,
core_line);
if (entry->status != LOOKUP_HIT) {
rq->info.seq_req = false;
/* There is miss then lookup for next map entry */
OCF_DEBUG_PARAM(cache, "Miss, core line = %llu",
entry->core_line);
continue;
}
OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu",
entry->coll_idx, entry->core_line);
/* Update eviction (LRU) */
ocf_eviction_set_hot_cache_line(cache, entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
}
int ocf_engine_check(struct ocf_request *rq)
{
int result = 0;
uint32_t i;
uint64_t core_line;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(rq->map[i]);
if (entry->status == LOOKUP_MISS) {
rq->info.seq_req = false;
continue;
}
if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) {
/* Mapping is invalid */
entry->invalid = true;
rq->info.seq_req = false;
OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u",
entry->coll_idx);
result = -1;
} else {
entry->invalid = false;
OCF_DEBUG_PARAM(cache, "Valid, Cache line %u",
entry->coll_idx);
ocf_engine_update_rq_info(cache, rq, i);
}
}
OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
return result;
}
static void ocf_engine_map_cache_line(struct ocf_request *rq,
uint64_t core_line, unsigned int hash_index,
ocf_cache_line_t *cache_line)
{
struct ocf_cache *cache = rq->cache;
ocf_part_id_t part_id = rq->part_id;
ocf_cleaning_t clean_policy_type;
if (cache->device->freelist_part->curr_size == 0) {
rq->info.eviction_error = 1;
return;
}
*cache_line = cache->device->freelist_part->head;
/* add_to_collision_list changes .next_col and other fields for entry
* so updated last_cache_line_give must be updated before calling it.
*/
ocf_metadata_remove_from_free_list(cache, *cache_line);
ocf_metadata_add_to_partition(cache, part_id, *cache_line);
/* Add the block to the corresponding collision list */
ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index,
*cache_line);
ocf_eviction_init_cache_line(cache, *cache_line, part_id);
/* Update LRU:: Move this node to head of lru list. */
ocf_eviction_set_hot_cache_line(cache, *cache_line);
/* Update dirty cache-block list */
clean_policy_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max);
if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL)
cleaning_policy_ops[clean_policy_type].
init_cache_block(cache, *cache_line);
}
static void ocf_engine_map_hndl_error(struct ocf_cache *cache,
struct ocf_request *rq)
{
uint32_t i;
struct ocf_map_info *entry;
for (i = 0; i < rq->core_line_count; i++) {
entry = &(rq->map[i]);
switch (entry->status) {
case LOOKUP_HIT:
case LOOKUP_MISS:
break;
case LOOKUP_MAPPED:
OCF_DEBUG_RQ(rq, "Canceling cache line %u",
entry->coll_idx);
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
entry->coll_idx);
break;
default:
ENV_BUG();
break;
}
}
}
void ocf_engine_map(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
uint32_t i;
struct ocf_map_info *entry;
uint64_t core_line;
int status = LOOKUP_MAPPED;
ocf_core_id_t core_id = rq->core_id;
if (ocf_engine_unmapped_count(rq))
status = space_managment_evict_do(cache, rq,
ocf_engine_unmapped_count(rq));
if (rq->info.eviction_error)
return;
ocf_rq_clear_info(rq);
rq->info.seq_req = true;
OCF_DEBUG_TRACE(rq->cache);
for (i = 0, core_line = rq->core_line_first;
core_line <= rq->core_line_last; core_line++, i++) {
entry = &(rq->map[i]);
ocf_engine_lookup_map_entry(cache, entry, core_id, core_line);
if (entry->status != LOOKUP_HIT) {
ocf_engine_map_cache_line(rq, entry->core_line,
entry->hash_key, &entry->coll_idx);
if (rq->info.eviction_error) {
/*
* Eviction error (mapping error), need to
* clean, return and do pass through
*/
OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping");
ocf_engine_map_hndl_error(cache, rq);
break;
}
entry->status = status;
}
OCF_DEBUG_PARAM(rq->cache,
"%s, cache line %u, core line = %llu",
entry->status == LOOKUP_HIT ? "Hit" : "Map",
entry->coll_idx, entry->core_line);
ocf_engine_update_rq_info(cache, rq, i);
}
OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ?
"Yes" : "No");
}
static void _ocf_engine_clean_end(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
OCF_DEBUG_RQ(rq, "Cleaning ERROR");
rq->error |= error;
/* End request and do not processing */
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, error);
/* Release OCF request */
ocf_rq_put(rq);
} else {
rq->info.dirty_any = 0;
rq->info.dirty_all = 0;
ocf_engine_push_rq_front(rq, true);
}
}
static int _ocf_engine_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
struct ocf_cleaner_attribs *attribs = getter_context;
struct ocf_request *rq = attribs->cmpl_context;
for (; attribs->getter_item < rq->core_line_count;
attribs->getter_item++) {
struct ocf_map_info *entry = &rq->map[attribs->getter_item];
if (entry->status != LOOKUP_HIT)
continue;
if (!metadata_test_dirty(cache, entry->coll_idx))
continue;
/* Line to be cleaned found, go to next item and return */
*line = entry->coll_idx;
attribs->getter_item++;
return 0;
}
return -1;
}
void ocf_engine_clean(struct ocf_request *rq)
{
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = false,
.cmpl_context = rq,
.cmpl_fn = _ocf_engine_clean_end,
.getter = _ocf_engine_clean_getter,
.getter_context = &attribs,
.getter_item = 0,
.count = rq->info.dirty_any,
.io_queue = rq->io_queue
};
/* Start cleaning */
ocf_cleaner_fire(rq->cache, &attribs);
}
void ocf_engine_update_block_stats(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_counters_block *blocks;
blocks = &cache->core_obj[core_id].counters->
part_counters[part_id].blocks;
if (rq->rw == OCF_READ)
env_atomic64_add(rq->byte_length, &blocks->read_bytes);
else if (rq->rw == OCF_WRITE)
env_atomic64_add(rq->byte_length, &blocks->write_bytes);
else
ENV_BUG();
}
void ocf_engine_update_request_stats(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_core_id_t core_id = rq->core_id;
ocf_part_id_t part_id = rq->part_id;
struct ocf_counters_req *reqs;
switch (rq->rw) {
case OCF_READ:
reqs = &cache->core_obj[core_id].counters->
part_counters[part_id].read_reqs;
break;
case OCF_WRITE:
reqs = &cache->core_obj[core_id].counters->
part_counters[part_id].write_reqs;
break;
default:
ENV_BUG();
}
env_atomic64_inc(&reqs->total);
if (rq->info.hit_no == 0)
env_atomic64_inc(&reqs->full_miss);
else if (rq->info.hit_no < rq->core_line_count)
env_atomic64_inc(&reqs->partial_miss);
}
void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add_tail(&rq->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync)
{
struct ocf_cache *cache = rq->cache;
struct ocf_queue *q = NULL;
unsigned long lock_flags;
INIT_LIST_HEAD(&rq->list);
ENV_BUG_ON(rq->io_queue >= cache->io_queues_no);
q = &cache->io_queues[rq->io_queue];
env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags);
list_add(&rq->list, &q->io_list);
env_atomic_inc(&q->io_no);
env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags);
if (!rq->info.internal)
env_atomic_set(&cache->last_access_ms,
env_ticks_to_msecs(env_get_tick_count()));
ctx_queue_kick(cache->owner, q, allow_sync);
}
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
const struct ocf_io_if *io_if,
bool allow_sync)
{
rq->error = 0; /* Please explain why!!! */
rq->io_if = io_if;
ocf_engine_push_rq_front(rq, allow_sync);
}
void inc_fallback_pt_error_counter(ocf_cache_t cache)
{
ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0);
if (cache->fallback_pt_error_threshold == OCF_CACHE_FALLBACK_PT_INACTIVE)
return;
if (env_atomic_inc_return(&cache->fallback_pt_error_counter) ==
cache->fallback_pt_error_threshold) {
ocf_cache_log(cache, log_info, "Error threshold reached. "
"Fallback Pass Through activated\n");
}
}
static int _ocf_engine_refresh(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
int result;
OCF_METADATA_LOCK_RD();
/* Check under metadata RD lock */
result = ocf_engine_check(rq);
OCF_METADATA_UNLOCK_RD();
if (result == 0) {
/* Refresh successful, can process with original IO interface */
rq->io_if = rq->priv;
rq->resume = NULL;
rq->priv = NULL;
if (rq->rw == OCF_READ)
rq->io_if->read(rq);
else if (rq->rw == OCF_WRITE)
rq->io_if->write(rq);
else
ENV_BUG();
} else {
ENV_WARN(true, OCF_PREFIX_SHORT" Inconsistent request");
rq->error = -EINVAL;
/* Complete request */
rq->complete(rq, rq->error);
/* Release WRITE lock of request */
ocf_rq_unlock(rq);
/* Release OCF request */
ocf_rq_put(rq);
}
return 0;
}
static const struct ocf_io_if _io_if_refresh = {
.read = _ocf_engine_refresh,
.write = _ocf_engine_refresh,
};
void ocf_engine_on_resume(struct ocf_request *rq)
{
ENV_BUG_ON(rq->priv);
ENV_BUG_ON(ocf_engine_on_resume != rq->resume);
OCF_CHECK_NULL(rq->io_if);
/* Exchange IO interface */
rq->priv = (void *)rq->io_if;
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false);
}

223
src/engine/engine_common.h Normal file
View File

@ -0,0 +1,223 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_COMMON_H_
#define ENGINE_COMMON_H_
#include "../ocf_request.h"
/**
* @file engine_common.h
* @brief OCF cache engine common module
*/
/**
* @brief Signal and handle OCF request error
*
* @param rq OCF request
* @param stop_cache Indicates if OCF cache engine need to be stopped
* @param msg Error message to be printed into log
*/
void ocf_engine_error(struct ocf_request *rq, bool stop_cache,
const char *msg);
/**
* @brief Check if OCF request is hit
*
* @param rq OCF request
*
* @retval true HIT
* @retval false MISS
*/
static inline bool ocf_engine_is_hit(struct ocf_request *rq)
{
return rq->info.hit_no == rq->core_line_count;
}
/**
* @brief Check if OCF request is miss
*
* @param rq OCF request
*
* @retval true MISS
* @retval false HIT
*/
#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq))
/**
* @brief Check if all cache lines are mapped fully
*
* @param rq OCF request
*
* @retval true request is mapped fully
* @retval false request is not mapped fully and eviction might be run in
* order to complete mapping
*/
static inline bool ocf_engine_is_mapped(struct ocf_request *rq)
{
return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count;
}
/**
* @brief Check if all cache lines are dirty
*
* @param rq OCF request
*
* @retval true request is dirty fully
* @retval false request is not dirty fully
*/
static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq)
{
return rq->info.dirty_all == rq->core_line_count;
}
/**
* @brief Get number of mapped cache lines
*
* @param rq OCF request
*
* @return Number of mapped cache lines
*/
static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq)
{
return rq->info.hit_no + rq->info.invalid_no;
}
/**
* @brief Get number of unmapped cache lines
*
* @param rq OCF request
*
* @return Number of unmapped cache lines
*/
static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq)
{
return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no);
}
/**
* @brief Get number of IOs to perform cache read or write
*
* @param rq OCF request
*
* @return Count of cache IOs
*/
static inline uint32_t ocf_engine_io_count(struct ocf_request *rq)
{
return rq->info.seq_req ? 1 : rq->core_line_count;
}
/**
* @brief Clean request (flush dirty data to the core device)
*
* @param rq OCF request
*
* @note After successful cleaning:
* - Dirty status bits in request info will be cleared
* - Request will be pushed front, <B>IO interface need to be set</B>
*
* @note In case of failure:
* - unlock request
* - complete request to the application
* - free request
*/
void ocf_engine_clean(struct ocf_request *rq);
void ocf_engine_lookup_map_entry(struct ocf_cache *cache,
struct ocf_map_info *entry, ocf_core_id_t core_id,
uint64_t core_line);
/**
* @brief Traverse request in order to lookup cache lines If there are misses
* need to call eviction. This process is called 'mapping'.
*
* @note This function CALL EVICTION
*
* @param rq OCF request
*/
void ocf_engine_map(struct ocf_request *rq);
/**
* @brief Traverse OCF request (lookup cache)
*
* @note This function DO NOT CALL EVICTION. Only lookup in metadata is
* performed. Main purpose of this function is to check if there is a HIT.
*
* @param rq OCF request
*/
void ocf_engine_traverse(struct ocf_request *rq);
/**
* @brief Check if OCF request mapping is still valid
*
* @note If mapping entries is invalid it will be marked
*
* @param rq OCF request
*
* @retval 0 - OCF request mapping is valid
* @return Non zero - OCF request mapping is invalid and need to call re-mapping
*/
int ocf_engine_check(struct ocf_request *rq);
/**
* @brief Update OCF request info
*
* @param rq OCF request
*/
void ocf_engine_update_rq_info(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t entry);
/**
* @brief Update OCF request block statistics for an exported object
*
* @param rq OCF request
*/
void ocf_engine_update_block_stats(struct ocf_request *rq);
/**
* @brief Update OCF request request statistics for an exported object
* (not applicable to write wi and to read wt
*
* @param rq OCF request
*/
void ocf_engine_update_request_stats(struct ocf_request *rq);
/**
* @brief Push front OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_back(struct ocf_request *rq,
bool allow_sync);
/**
* @brief Push back OCF request to the OCF thread worker queue
*
* @param rq OCF request
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front(struct ocf_request *rq,
bool allow_sync);
/**
* @brief Set interface and push from request to the OCF thread worker queue
*
* @param rq OCF request
* @param io_if IO interface
* @param allow_sync caller allows for request from queue to be ran immediately
from push function in caller context
*/
void ocf_engine_push_rq_front_if(struct ocf_request *rq,
const struct ocf_io_if *io_if,
bool allow_sync);
void inc_fallback_pt_error_counter(ocf_cache_t cache);
void ocf_engine_on_resume(struct ocf_request *rq);
#endif /* ENGINE_COMMON_H_ */

72
src/engine/engine_d2c.c Normal file
View File

@ -0,0 +1,72 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_d2c.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "d2c"
#include "engine_debug.h"
static void _ocf_d2c_completion(void *private_data, int error)
{
struct ocf_request *rq = private_data;
rq->error = error;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
rq->info.core_error = 1;
if (rq->rw == OCF_READ) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.read);
} else {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
}
/* Complete request */
rq->complete(rq, rq->error);
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_io_d2c(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
_ocf_d2c_completion, rq);
ocf_engine_update_block_stats(rq);
if (rq->rw == OCF_READ) {
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].read_reqs.pass_through);
} else {
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_d2c.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_2DC_H_
#define ENGINE_2DC_H_
int ocf_io_d2c(struct ocf_request *rq);
#endif /* ENGINE_2DC_H_ */

48
src/engine/engine_debug.h Normal file
View File

@ -0,0 +1,48 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_DEBUG_H_
#define ENGINE_DEBUG_H_
#ifndef OCF_ENGINE_DEBUG
#define OCF_ENGINE_DEBUG 0
#endif
#if 1 == OCF_ENGINE_DEBUG
#ifndef OCF_ENGINE_DEBUG_IO_NAME
#define OCF_ENGINE_DEBUG_IO_NAME "null"
#endif
#define OCF_DEBUG_PREFIX "[Engine][%s] %s "
#define OCF_DEBUG_LOG(cache, format, ...) \
ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, __func__, \
##__VA_ARGS__)
#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "")
#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg)
#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \
##__VA_ARGS__)
#define OCF_DEBUG_RQ(rq, format, ...) \
ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \
format"\n", OCF_ENGINE_DEBUG_IO_NAME, \
OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \
rq->byte_length, __func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_PREFIX
#define OCF_DEBUG_LOG(cache, format, ...)
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#define OCF_DEBUG_RQ(rq, format, ...)
#endif
#endif /* ENGINE_DEBUG_H_ */

248
src/engine/engine_discard.c Normal file
View File

@ -0,0 +1,248 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_discard.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG 0
#define OCF_ENGINE_DEBUG_IO_NAME "discard"
#include "engine_debug.h"
static int _ocf_discard_step_do(struct ocf_request *rq);
static int _ocf_discard_step(struct ocf_request *rq);
static int _ocf_discard_flush_cache(struct ocf_request *rq);
static int _ocf_discard_core(struct ocf_request *rq);
static const struct ocf_io_if _io_if_discard_step = {
.read = _ocf_discard_step,
.write = _ocf_discard_step
};
static const struct ocf_io_if _io_if_discard_step_resume = {
.read = _ocf_discard_step_do,
.write = _ocf_discard_step_do
};
static const struct ocf_io_if _io_if_discard_flush_cache = {
.read = _ocf_discard_flush_cache,
.write = _ocf_discard_flush_cache,
};
static const struct ocf_io_if _io_if_discard_core = {
.read = _ocf_discard_core,
.write = _ocf_discard_core
};
static void _ocf_discard_complete_rq(struct ocf_request *rq, int error)
{
rq->complete(rq, error);
ocf_rq_put(rq);
}
static void _ocf_discard_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
OCF_DEBUG_RQ(rq, "Core DISCARD Completion");
_ocf_discard_complete_rq(rq, error);
}
static int _ocf_discard_core(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_submit_obj_discard(&cache->core_obj[rq->core_id].obj, rq,
_ocf_discard_core_io, rq);
return 0;
}
static void _ocf_discard_cache_flush_io_cmpl(void *priv, int error)
{
struct ocf_request *rq = priv;
if (error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, error);
return;
}
rq->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
}
static int _ocf_discard_flush_cache(struct ocf_request *rq)
{
ocf_submit_obj_flush(&rq->cache->device->obj,
_ocf_discard_cache_flush_io_cmpl, rq);
return 0;
}
static void _ocf_discard_finish_step(struct ocf_request *rq)
{
rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length);
if (rq->discard.handled < rq->discard.nr_sects)
rq->io_if = &_io_if_discard_step;
else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile)
rq->io_if = &_io_if_discard_flush_cache;
else
rq->io_if = &_io_if_discard_core;
ocf_engine_push_rq_front(rq, true);
}
static void _ocf_discard_step_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
/* Release WRITE lock of request */
ocf_rq_unlock_wr(rq);
if (rq->error) {
ocf_metadata_error(rq->cache);
_ocf_discard_complete_rq(rq, rq->error);
return;
}
_ocf_discard_finish_step(rq);
}
int _ocf_discard_step_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_discard_step_io);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
OCF_DEBUG_RQ(rq, "Discard");
_ocf_discard_step_io(rq, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static void _ocf_discard_on_resume(struct ocf_request *rq)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
}
static int _ocf_discard_step(struct ocf_request *rq)
{
int lock;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector +
rq->discard.handled);
rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects -
rq->discard.handled), MAX_TRIM_RQ_SIZE);
rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position);
rq->core_line_last =
ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1);
rq->core_line_count = rq->core_line_last - rq->core_line_first + 1;
rq->io_if = &_io_if_discard_step_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count,
0));
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
if (ocf_engine_mapped_count(rq)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
} else {
lock = OCF_LOCK_ACQUIRED;
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (OCF_LOCK_ACQUIRED == lock) {
_ocf_discard_step_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK")
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->error |= lock;
_ocf_discard_finish_step(rq);
}
env_cond_resched();
return 0;
}
int ocf_discard(struct ocf_request *rq)
{
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
if (rq->rw == OCF_READ) {
rq->complete(rq, -EINVAL);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = _ocf_discard_on_resume;
_ocf_discard_step(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __ENGINE_DISCARD_H__
#define __ENGINE_DISCARD_H__
int ocf_discard(struct ocf_request *rq);
#endif

235
src/engine/engine_fast.c Normal file
View File

@ -0,0 +1,235 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_fast.h"
#include "engine_common.h"
#include "engine_pt.h"
#include "engine_wb.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_part.h"
#include "../utils/utils_io.h"
#include "../concurrency/ocf_concurrency.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG 0
#define OCF_ENGINE_DEBUG_IO_NAME "fast"
#include "engine_debug.h"
/* _____ _ ______ _ _____ _ _
* | __ \ | | | ____| | | | __ \ | | | |
* | |__) |___ __ _ __| | | |__ __ _ ___| |_ | |__) |_ _| |_| |__
* | _ // _ \/ _` |/ _` | | __/ _` / __| __| | ___/ _` | __| '_ \
* | | \ \ __/ (_| | (_| | | | | (_| \__ \ |_ | | | (_| | |_| | | |
* |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
*/
static void _ocf_read_fast_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining)) {
/* Not all requests finished */
return;
}
OCF_DEBUG_RQ(rq, "HIT completion");
if (rq->error) {
OCF_DEBUG_RQ(rq, "ERROR");
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
} else {
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, rq->error);
/* Free the request at the last point of the completion path */
ocf_rq_put(rq);
}
}
static int _ocf_read_fast_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
/* It seams that after resume, now request is MISS, do PT */
OCF_DEBUG_RQ(rq, "Switching to read PT");
ocf_read_pt_do(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
/* Submit IO */
OCF_DEBUG_RQ(rq, "Submit");
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_fast_io, rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_read_fast_resume = {
.read = _ocf_read_fast_do,
.write = _ocf_read_fast_do,
};
int ocf_read_fast(struct ocf_request *rq)
{
bool hit;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
hit = ocf_engine_is_hit(rq);
if (hit) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_rd(rq);
}
OCF_METADATA_UNLOCK_RD();
if (hit) {
OCF_DEBUG_RQ(rq, "Fast path success");
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_fast_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR");
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
if (hit)
return OCF_FAST_PATH_YES;
else
return OCF_FAST_PATH_NO;
}
/* __ __ _ _ ______ _ _____ _ _
* \ \ / / (_) | | ____| | | | __ \ | | | |
* \ \ /\ / / __ _| |_ ___ | |__ __ _ ___| |_ | |__) |_ _| |_| |__
* \ \/ \/ / '__| | __/ _ \ | __/ _` / __| __| | ___/ _` | __| '_ \
* \ /\ /| | | | || __/ | | | (_| \__ \ |_ | | | (_| | |_| | | |
* \/ \/ |_| |_|\__\___| |_| \__,_|___/\__| |_| \__,_|\__|_| |_|
*/
static const struct ocf_io_if _io_if_write_fast_resume = {
.read = ocf_write_wb_do,
.write = ocf_write_wb_do,
};
int ocf_write_fast(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_write_fast_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
ocf_io_start(rq->io);
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD();
if (mapped) {
if (lock >= 0) {
OCF_DEBUG_RQ(rq, "Fast path success");
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
ocf_write_wb_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path lock failure");
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
OCF_DEBUG_RQ(rq, "Fast path failure");
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO;
}

12
src/engine/engine_fast.h Normal file
View File

@ -0,0 +1,12 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_FAST_H_
#define ENGINE_FAST_H_
int ocf_read_fast(struct ocf_request *rq);
int ocf_write_fast(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

74
src/engine/engine_inv.c Normal file
View File

@ -0,0 +1,74 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "inv"
#include "engine_debug.h"
static void _ocf_invalidate_rq(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error)
ocf_engine_error(rq, true, "Failed to flush metadata to cache");
ocf_rq_unlock(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
}
static int _ocf_invalidate_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ENV_BUG_ON(env_atomic_read(&rq->req_remaining));
OCF_METADATA_LOCK_WR();
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR();
env_atomic_inc(&rq->req_remaining);
if (ocf_data_obj_is_atomic(&cache->device->obj) &&
rq->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq);
}
_ocf_invalidate_rq(rq, 0);
return 0;
}
static const struct ocf_io_if _io_if_invalidate = {
.read = _ocf_invalidate_do,
.write = _ocf_invalidate_do,
};
void ocf_engine_invalidate(struct ocf_request *rq)
{
ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true);
}

11
src/engine/engine_inv.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_INV_H_
#define ENGINE_INV_H_
void ocf_engine_invalidate(struct ocf_request *rq);
#endif /* ENGINE_INV_H_ */

65
src/engine/engine_ops.c Normal file
View File

@ -0,0 +1,65 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "engine_ops.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#define OCF_ENGINE_DEBUG_IO_NAME "ops"
#include "engine_debug.h"
static void _ocf_engine_ops_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
/* An error occured */
ocf_engine_error(rq, false, "Core operation failure");
}
/* Complete requests - both to cache and to core*/
rq->complete(rq, rq->error);
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_engine_ops(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* IO to the core device and to the cache device */
env_atomic_set(&rq->req_remaining, 2);
/* Submit operation into core device */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw,
_ocf_engine_ops_io, rq);
ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw,
1, _ocf_engine_ops_io, rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_ops.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __CACHE_ENGINE_OPS_H_
#define __CACHE_ENGINE_OPS_H_
int ocf_engine_ops(struct ocf_request *rq);
#endif /* __CACHE_ENGINE_OPS_H_ */

181
src/engine/engine_pt.c Normal file
View File

@ -0,0 +1,181 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_pt.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "pt"
#include "engine_debug.h"
static void _ocf_read_pt_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.read);
}
/* Complete request */
rq->complete(rq, rq->error);
ocf_rq_unlock_rd(rq);
/* Release OCF request */
ocf_rq_put(rq);
}
static inline void _ocf_read_pt_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* Core device IO */
OCF_DEBUG_RQ(rq, "Submit");
/* Core read */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
_ocf_read_pt_io, rq);
}
int ocf_read_pt_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Need to clean, start it */
ocf_engine_clean(rq);
OCF_METADATA_UNLOCK_RD();
/* Do not processing, because first we need to clean request */
ocf_rq_put(rq);
return 0;
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
/* Submit read IO to the core */
_ocf_read_pt_submit(rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].read_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_pt_resume = {
.read = ocf_read_pt_do,
.write = ocf_read_pt_do,
};
int ocf_read_pt(struct ocf_request *rq)
{
bool use_cache = false;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_pt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) {
use_cache = true;
} else {
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line,
* lock request for READ access
*/
lock = ocf_rq_trylock_rd(rq);
} else {
/* No mapped cache lines, no need to get lock */
lock = OCF_LOCK_ACQUIRED;
}
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
if (use_cache) {
/*
* There is dirt HIT, and sequential cut off,
* because of this force read data from cache
*/
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_wt)->read(rq);
} else {
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
/* Lock acquired perform read off operations */
ocf_read_pt_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
void ocf_engine_push_rq_front_pt(struct ocf_request *rq)
{
ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true);
}

15
src/engine/engine_pt.h Normal file
View File

@ -0,0 +1,15 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_OFF_H_
#define ENGINE_OFF_H_
int ocf_read_pt(struct ocf_request *rq);
int ocf_read_pt_do(struct ocf_request *rq);
void ocf_engine_push_rq_front_pt(struct ocf_request *rq);
#endif /* ENGINE_OFF_H_ */

319
src/engine/engine_rd.c Normal file
View File

@ -0,0 +1,319 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_rd.h"
#include "engine_pt.h"
#include "engine_inv.h"
#include "engine_bf.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_io.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../ocf_def_priv.h"
#define OCF_ENGINE_DEBUG_IO_NAME "rd"
#include "engine_debug.h"
static void _ocf_read_generic_hit_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "HIT completion");
if (rq->error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].
counters->cache_errors.read);
ocf_engine_push_rq_front_pt(rq);
} else {
ocf_rq_unlock(rq);
/* Complete request */
rq->complete(rq, rq->error);
/* Free the request at the last point
* of the completion path
*/
ocf_rq_put(rq);
}
}
}
static void _ocf_read_generic_miss_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
struct ocf_cache *cache = rq->cache;
if (error)
rq->error = error;
/* Handle callback-caller race to let only one of the two complete the
* request. Also, complete original request only if this is the last
* sub-request to complete
*/
if (env_atomic_dec_return(&rq->req_remaining) == 0) {
OCF_DEBUG_RQ(rq, "MISS completion");
if (rq->error) {
/*
* --- Do not submit this request to write-back-thread.
* Stop it here ---
*/
rq->complete(rq, rq->error);
rq->info.core_error = 1;
env_atomic_inc(&cache->core_obj[rq->core_id].
counters->core_errors.read);
ctx_data_free(cache->owner, rq->cp_data);
rq->cp_data = NULL;
/* Invalidate metadata */
ocf_engine_invalidate(rq);
return;
}
/* Copy pages to copy vec, since this is the one needed
* by the above layer
*/
ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0,
rq->byte_length);
/* Complete request */
rq->complete(rq, rq->error);
ocf_engine_backfill(rq);
}
}
static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq)
{
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ,
ocf_engine_io_count(rq), _ocf_read_generic_hit_io, rq);
}
static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
int ret;
env_atomic_set(&rq->req_remaining, 1);
rq->cp_data = ctx_data_alloc(cache->owner,
BYTES_TO_PAGES(rq->byte_length));
if (!rq->cp_data)
goto err_alloc;
ret = ctx_data_mlock(cache->owner, rq->cp_data);
if (ret)
goto err_alloc;
/* Submit read request to core device. */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ,
_ocf_read_generic_miss_io, rq);
return;
err_alloc:
_ocf_read_generic_miss_io(rq, -ENOMEM);
}
static int _ocf_read_generic_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq) && rq->map->rd_locked) {
/* Miss can be handled only on write locks.
* Need to switch to PT
*/
OCF_DEBUG_RQ(rq, "Switching to PT");
ocf_read_pt_do(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
if (ocf_engine_is_miss(rq)) {
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_RD();
/* Request is dirty need to clean request */
ocf_engine_clean(rq);
OCF_METADATA_UNLOCK_RD();
/* We need to clean request before processing, return */
ocf_rq_put(rq);
return 0;
}
OCF_METADATA_LOCK_RD();
/* Set valid status bits map */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit");
/* Submit IO */
if (ocf_engine_is_hit(rq))
_ocf_read_generic_submit_hit(rq);
else
_ocf_read_generic_submit_miss(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_read_generic_resume = {
.read = _ocf_read_generic_do,
.write = _ocf_read_generic_do,
};
int ocf_read_generic(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
if (env_atomic_read(&cache->pending_read_misses_list_blocked)) {
/* There are conditions to bypass IO */
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
return 0;
}
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_read_generic_resume;
/*- Metadata RD access -----------------------------------------------*/
OCF_METADATA_LOCK_RD();
/* Traverse request to cache if there is hit */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* Request is fully mapped, no need to call eviction */
if (ocf_engine_is_hit(rq)) {
/* There is a hit, lock request for READ access */
lock = ocf_rq_trylock_rd(rq);
} else {
/* All cache line mapped, but some sectors are not valid
* and cache insert will be performed - lock for
* WRITE is required
*/
lock = ocf_rq_trylock_wr(rq);
}
}
OCF_METADATA_UNLOCK_RD();
/*- END Metadata RD access -------------------------------------------*/
if (!mapped) {
/*- Metadata WR access ---------------------------------------*/
OCF_METADATA_LOCK_WR();
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
if (ocf_engine_is_hit(rq)) {
/* After mapping turns out there is hit,
* so lock OCF request for read access
*/
lock = ocf_rq_trylock_rd(rq);
} else {
/* Miss, new cache lines were mapped,
* need to lock OCF request for write access
*/
lock = ocf_rq_trylock_wr(rq);
}
}
OCF_METADATA_UNLOCK_WR();
/*- END Metadata WR access -----------------------------------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* Lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
/* Lock was acquired can perform IO */
_ocf_read_generic_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->read(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_rd.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_RD_H_
#define ENGINE_RD_H_
int ocf_read_generic(struct ocf_request *rq);
#endif /* ENGINE_RD_H_ */

92
src/engine/engine_wa.c Normal file
View File

@ -0,0 +1,92 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wa.h"
#include "engine_common.h"
#include "cache_engine.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wa"
#include "engine_debug.h"
static void _ocf_read_wa_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error)
rq->error |= error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error) {
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
/* Complete request */
rq->complete(rq, rq->error);
OCF_DEBUG_RQ(rq, "Completion");
/* Release OCF request */
ocf_rq_put(rq);
}
int ocf_write_wa(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/
/* Traverse request to check if there are mapped cache lines */
ocf_engine_traverse(rq);
OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/
if (ocf_engine_is_hit(rq)) {
ocf_rq_clear(rq);
/* There is HIT, do WT */
ocf_get_io_if(ocf_cache_mode_wt)->write(rq);
} else if (ocf_engine_mapped_count(rq)) {
ocf_rq_clear(rq);
/* Partial MISS, do WI */
ocf_get_io_if(ocf_cache_mode_wi)->write(rq);
} else {
/* There is no mapped cache line, write directly into core */
OCF_DEBUG_RQ(rq, "Submit");
/* Submit write IO to the core */
env_atomic_set(&rq->req_remaining, 1);
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq,
OCF_WRITE, _ocf_read_wa_io, rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wa.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WA_H_
#define ENGINE_WA_H_
int ocf_write_wa(struct ocf_request *rq);
#endif /* ENGINE_WA_H_ */

242
src/engine/engine_wb.c Normal file
View File

@ -0,0 +1,242 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "cache_engine.h"
#include "engine_common.h"
#include "engine_wb.h"
#include "../metadata/metadata.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wb"
#include "engine_debug.h"
static const struct ocf_io_if _io_if_wb_resume = {
.read = ocf_write_wb_do,
.write = ocf_write_wb_do,
};
static void _ocf_write_wb_update_bits(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (!ocf_engine_is_dirty_all(rq)) {
OCF_METADATA_LOCK_WR();
/* set dirty bits, and mark if metadata flushing is required */
ocf_set_dirty_map_info(rq);
OCF_METADATA_UNLOCK_WR();
}
}
static void _ocf_write_wb_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error)
rq->error = error;
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
}
static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (rq->info.flush_metadata) {
OCF_DEBUG_RQ(rq, "Flush metadata");
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wb_io_flush_metadata);
}
_ocf_write_wb_io_flush_metadata(rq, 0);
return 0;
}
static const struct ocf_io_if _io_if_wb_flush_metadata = {
.read = ocf_write_wb_do_flush_metadata,
.write = ocf_write_wb_do_flush_metadata,
};
static void _ocf_write_wb_io(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata,
true);
}
}
static inline void _ocf_write_wb_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq));
/*
* 1. Submit data
* 2. Wait for completion of data
* 3. Then continue processing request (flush metadata)
*/
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
OCF_DEBUG_RQ(rq, "Submit Data");
/* Data IO */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wb_io, rq);
}
int ocf_write_wb_do(struct ocf_request *rq)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Updata status bits */
_ocf_write_wb_update_bits(rq);
/* Submit IO */
_ocf_write_wb_submit(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
int ocf_write_wb(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Not sure if we need this. */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wb_resume;
/* TODO: Handle fits into dirty */
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (!mapped) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
ocf_write_wb_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

12
src/engine/engine_wb.h Normal file
View File

@ -0,0 +1,12 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WB_H_
#define ENGINE_WB_H_
int ocf_write_wb(struct ocf_request *rq);
int ocf_write_wb_do(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

190
src/engine/engine_wi.c Normal file
View File

@ -0,0 +1,190 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wi.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wi"
#include "engine_debug.h"
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq);
static const struct ocf_io_if _io_if_wi_flush_metadata = {
.read = ocf_write_wi_update_and_flush_metadata,
.write = ocf_write_wi_update_and_flush_metadata,
};
static void _ocf_write_wi_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error |= error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
if (rq->error)
ocf_engine_error(rq, true, "Failed to write data to cache");
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
}
static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
if (ocf_engine_mapped_count(rq)) {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wi_io_flush_metadata);
}
}
_ocf_write_wi_io_flush_metadata(rq, 0);
return 0;
}
static void _ocf_write_wi_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
} else {
ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata,
true);
}
}
static int _ocf_write_wi_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
env_atomic_set(&rq->req_remaining, 1); /* One core IO */
OCF_DEBUG_RQ(rq, "Submit");
/* Submit write IO to the core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
_ocf_write_wi_core_io, rq);
/* Update statistics */
ocf_engine_update_block_stats(rq);
env_atomic64_inc(&cache->core_obj[rq->core_id].counters->
part_counters[rq->part_id].write_reqs.pass_through);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static void _ocf_write_wi_on_resume(struct ocf_request *rq)
{
OCF_DEBUG_RQ(rq, "On resume");
ocf_engine_push_rq_front(rq, true);
}
static const struct ocf_io_if _io_if_wi_resume = {
.read = _ocf_write_wi_do,
.write = _ocf_write_wi_do,
};
int ocf_write_wi(struct ocf_request *rq)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
OCF_DEBUG_TRACE(rq->cache);
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = _ocf_write_wi_on_resume;
rq->io_if = &_io_if_wi_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
if (ocf_engine_mapped_count(rq)) {
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
} else {
lock = OCF_LOCK_ACQUIRED;
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (lock >= 0) {
if (lock == OCF_LOCK_ACQUIRED) {
_ocf_write_wi_do(rq);
} else {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wi.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WI_H_
#define ENGINE_WI_H_
int ocf_write_wi(struct ocf_request *rq);
#endif /* ENGINE_WI_H_ */

236
src/engine/engine_wt.c Normal file
View File

@ -0,0 +1,236 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_wt.h"
#include "engine_inv.h"
#include "engine_common.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_part.h"
#include "../metadata/metadata.h"
#include "../concurrency/ocf_concurrency.h"
#define OCF_ENGINE_DEBUG_IO_NAME "wt"
#include "engine_debug.h"
static void _ocf_write_wt_io(struct ocf_request *rq)
{
if (env_atomic_dec_return(&rq->req_remaining))
return;
OCF_DEBUG_RQ(rq, "Completion");
if (rq->error) {
/* An error occured */
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
ocf_engine_invalidate(rq);
} else {
/* Unlock reqest from WRITE access */
ocf_rq_unlock_wr(rq);
/* Complete request */
rq->complete(rq, rq->info.core_error ? rq->error : 0);
/* Release OCF request */
ocf_rq_put(rq);
}
}
static void _ocf_write_wt_cache_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = rq->error ?: error;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
if (rq->error)
inc_fallback_pt_error_counter(rq->cache);
}
_ocf_write_wt_io(rq);
}
static void _ocf_write_wt_core_io(void *private_data, int error)
{
struct ocf_request *rq = private_data;
if (error) {
rq->error = error;
rq->info.core_error = 1;
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
core_errors.write);
}
_ocf_write_wt_io(rq);
}
static inline void _ocf_write_wt_submit(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Submit IOs */
OCF_DEBUG_RQ(rq, "Submit");
/* Calculate how many IOs need to be submited */
env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */
env_atomic_inc(&rq->req_remaining); /* Core device IO */
if (rq->info.flush_metadata) {
/* Metadata flush IO */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_write_wt_cache_io);
}
/* To cache */
ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE,
ocf_engine_io_count(rq), _ocf_write_wt_cache_io, rq);
/* To core */
ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE,
_ocf_write_wt_core_io, rq);
}
static void _ocf_write_wt_update_bits(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (ocf_engine_is_miss(rq)) {
OCF_METADATA_LOCK_RD();
/* Update valid status bits */
ocf_set_valid_map_info(rq);
OCF_METADATA_UNLOCK_RD();
}
if (rq->info.dirty_any) {
OCF_METADATA_LOCK_WR();
/* Writes goes to SDD and HDD, need to update status bits from
* dirty to clean
*/
ocf_set_clean_map_info(rq);
OCF_METADATA_UNLOCK_WR();
}
if (rq->info.re_part) {
OCF_DEBUG_RQ(rq, "Re-Part");
OCF_METADATA_LOCK_WR();
/* Probably some cache lines are assigned into wrong
* partition. Need to move it to new one
*/
ocf_part_move(rq);
OCF_METADATA_UNLOCK_WR();
}
}
static int _ocf_write_wt_do(struct ocf_request *rq)
{
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Update status bits */
_ocf_write_wt_update_bits(rq);
/* Submit IO */
_ocf_write_wt_submit(rq);
/* Updata statistics */
ocf_engine_update_request_stats(rq);
ocf_engine_update_block_stats(rq);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_wt_resume = {
.read = _ocf_write_wt_do,
.write = _ocf_write_wt_do,
};
int ocf_write_wt(struct ocf_request *rq)
{
bool mapped;
int lock = OCF_LOCK_NOT_ACQUIRED;
struct ocf_cache *cache = rq->cache;
ocf_io_start(rq->io);
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Set resume call backs */
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_wt_resume;
OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/
/* Travers to check if request is mapped fully */
ocf_engine_traverse(rq);
mapped = ocf_engine_is_mapped(rq);
if (mapped) {
/* All cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/
if (!mapped) {
OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/
/* Now there is exclusive access for metadata. May traverse once
* again. If there are misses need to call eviction. This
* process is called 'mapping'.
*/
ocf_engine_map(rq);
if (!rq->info.eviction_error) {
/* Lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
}
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
if (!rq->info.eviction_error) {
if (lock >= 0) {
if (lock != OCF_LOCK_ACQUIRED) {
/* WR lock was not acquired, need to wait for resume */
OCF_DEBUG_RQ(rq, "NO LOCK");
} else {
_ocf_write_wt_do(rq);
}
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
} else {
ocf_rq_clear(rq);
ocf_get_io_if(ocf_cache_mode_pt)->write(rq);
}
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}

11
src/engine/engine_wt.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_WT_H_
#define ENGINE_WT_H_
int ocf_write_wt(struct ocf_request *rq);
#endif /* ENGINE_WT_H_ */

168
src/engine/engine_zero.c Normal file
View File

@ -0,0 +1,168 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "engine_zero.h"
#include "engine_common.h"
#include "../concurrency/ocf_concurrency.h"
#include "../utils/utils_rq.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_io.h"
#include "../metadata/metadata.h"
#define OCF_ENGINE_DEBUG_IO_NAME "zero"
#include "engine_debug.h"
static int ocf_zero_purge(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
if (rq->error) {
ocf_engine_error(rq, true, "Failed to discard data on cache");
} else {
/* There are mapped cache line, need to remove them */
OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/
/* Remove mapped cache lines from metadata */
ocf_purge_map_info(rq);
OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/
}
ocf_rq_unlock_wr(rq);
rq->complete(rq, rq->error);
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_zero_purge = {
.read = ocf_zero_purge,
.write = ocf_zero_purge,
};
static void _ocf_zero_io_flush_metadata(void *private_data, int error)
{
struct ocf_request *rq = (struct ocf_request *) private_data;
if (error) {
env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters->
cache_errors.write);
rq->error = error;
}
if (env_atomic_dec_return(&rq->req_remaining))
return;
ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true);
}
static inline void ocf_zero_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_map_info *map = rq->map;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
/* Purge range on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
if (map[map_idx].status == LOOKUP_MISS)
continue;
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
ocf_line_sectors(cache);
}
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID,
start_bit, end_bit);
}
}
static int _ocf_zero_do(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
/* Get OCF request - increase reference counter */
ocf_rq_get(rq);
/* Mark cache lines for zeroing/discarding */
ocf_zero_map_info(rq);
/* Discard marked cache lines */
env_atomic_set(&rq->req_remaining, 1);
if (rq->info.flush_metadata) {
/* Request was dirty and need to flush metadata */
ocf_metadata_flush_do_asynch(cache, rq,
_ocf_zero_io_flush_metadata);
}
_ocf_zero_io_flush_metadata(rq, 0);
/* Put OCF request - decrease reference counter */
ocf_rq_put(rq);
return 0;
}
static const struct ocf_io_if _io_if_ocf_zero_do = {
.read = _ocf_zero_do,
.write = _ocf_zero_do,
};
/**
* @note
* - Caller has to have metadata write lock
* - Core line has to be mapped
*/
void ocf_engine_zero_line(struct ocf_request *rq)
{
int lock = OCF_LOCK_NOT_ACQUIRED;
ENV_BUG_ON(rq->core_line_count != 1);
/* Traverse to check if request is mapped */
ocf_engine_traverse(rq);
ENV_BUG_ON(!ocf_engine_is_mapped(rq));
rq->resume = ocf_engine_on_resume;
rq->io_if = &_io_if_ocf_zero_do;
/* Some cache line are mapped, lock request for WRITE access */
lock = ocf_rq_trylock_wr(rq);
if (lock >= 0) {
ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED);
ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true);
} else {
OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock);
rq->complete(rq, lock);
ocf_rq_put(rq);
}
}

11
src/engine/engine_zero.h Normal file
View File

@ -0,0 +1,11 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef ENGINE_ZERO_H_
#define ENGINE_ZERO_H_
void ocf_engine_zero_line(struct ocf_request *rq);
#endif /* ENGINE_ZERO_H_ */

19
src/eviction/eviction.c Normal file
View File

@ -0,0 +1,19 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = {
[ocf_eviction_lru] = {
.init_cline = evp_lru_init_cline,
.rm_cline = evp_lru_rm_cline,
.req_clines = evp_lru_req_clines,
.hot_cline = evp_lru_hot_cline,
.init_evp = evp_lru_init_evp,
.dirty_cline = evp_lru_dirty_cline,
.clean_cline = evp_lru_clean_cline,
.name = "lru",
},
};

56
src/eviction/eviction.h Normal file
View File

@ -0,0 +1,56 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_EVICTION_POLICY_H__
#define __LAYER_EVICTION_POLICY_H__
#define OCF_PENDING_EVICTION_LIMIT 512UL
#include "ocf/ocf.h"
#include "lru.h"
#include "lru_structs.h"
struct eviction_policy {
union {
struct lru_eviction_policy lru;
} policy;
};
/* Eviction policy metadata per cache line */
union eviction_policy_meta {
struct lru_eviction_policy_meta lru;
} __attribute__((packed));
/* the caller must hold the metadata lock for all operations
*
* For range operations the caller can:
* set core_id to -1 to purge the whole cache device
* set core_id to -2 to purge the whole cache partition
*/
struct eviction_policy_ops {
void (*init_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*rm_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
bool (*can_evict)(struct ocf_cache *cache);
uint32_t (*req_clines)(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id,
uint32_t cline_no, ocf_core_id_t core_id);
void (*hot_cline)(struct ocf_cache *cache,
ocf_cache_line_t cline);
void (*init_evp)(struct ocf_cache *cache,
ocf_part_id_t part_id);
void (*dirty_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
void (*clean_cline)(struct ocf_cache *cache,
ocf_part_id_t part_id,
uint32_t cline_no);
const char *name;
};
extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max];
#endif

503
src/eviction/lru.c Normal file
View File

@ -0,0 +1,503 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "eviction.h"
#include "lru.h"
#include "ops.h"
#include "../utils/utils_cleaner.h"
#include "../utils/utils_cache_line.h"
#include "../concurrency/ocf_concurrency.h"
#include "../mngt/ocf_mngt_common.h"
#include "../engine/engine_zero.h"
#include "../utils/utils_rq.h"
#define OCF_EVICTION_MAX_SCAN 1024
/* -- Start of LRU functions --*/
/* Returns 1 if the given collision_index is the _head_ of
* the LRU list, 0 otherwise.
*/
/* static inline int is_lru_head(unsigned collision_index) {
* return collision_index == lru_list.lru_head;
* }
*/
#define is_lru_head(x) (x == collision_table_entries)
#define is_lru_tail(x) (x == collision_table_entries)
/* Sets the given collision_index as the new _head_ of the LRU list. */
static inline void update_lru_head(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_head = collision_index;
else
part->runtime->eviction.policy.lru.clean_head = collision_index;
}
/* Sets the given collision_index as the new _tail_ of the LRU list. */
static inline void update_lru_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index,
int cline_dirty)
{
struct ocf_user_part *part = &cache->user_parts[partition_id];
if (cline_dirty)
part->runtime->eviction.policy.lru.dirty_tail = collision_index;
else
part->runtime->eviction.policy.lru.clean_tail = collision_index;
}
/* Sets the given collision_index as the new _head_ and _tail_ of
* the LRU list.
*/
static inline void update_lru_head_tail(struct ocf_cache *cache,
int partition_id, unsigned int collision_index, int cline_dirty)
{
update_lru_head(cache, partition_id, collision_index, cline_dirty);
update_lru_tail(cache, partition_id, collision_index, cline_dirty);
}
/* Adds the given collision_index to the _head_ of the LRU list */
static void add_lru_head(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
unsigned int curr_head_index;
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* First node to be added/ */
if ((cline_dirty && !part->runtime->eviction.policy.lru.has_dirty_nodes) ||
(!cline_dirty && !part->runtime->eviction.policy.lru.has_clean_nodes)) {
update_lru_head_tail(cache, partition_id, collision_index, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 1;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 1;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
} else {
union eviction_policy_meta eviction_curr;
/* Not the first node to be added. */
curr_head_index = cline_dirty ?
part->runtime->eviction.policy.lru.dirty_head :
part->runtime->eviction.policy.lru.clean_head;
ENV_BUG_ON(!(curr_head_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, curr_head_index,
&eviction_curr);
eviction.lru.next = curr_head_index;
eviction.lru.prev = collision_table_entries;
eviction_curr.lru.prev = collision_index;
update_lru_head(cache, partition_id, collision_index, cline_dirty);
ocf_metadata_set_evicition_policy(cache, curr_head_index,
&eviction_curr);
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
}
}
/* Deletes the node with the given collision_index from the lru list */
static void remove_lru_list(struct ocf_cache *cache, int partition_id,
unsigned int collision_index, int cline_dirty)
{
int is_clean_head = 0, is_clean_tail = 0, is_dirty_head = 0, is_dirty_tail = 0;
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[partition_id];
union eviction_policy_meta eviction;
ENV_BUG_ON(!(collision_index < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, collision_index, &eviction);
/* Find out if this node is LRU _head_ or LRU _tail_ */
if (part->runtime->eviction.policy.lru.clean_head == collision_index)
is_clean_head = 1;
if (part->runtime->eviction.policy.lru.dirty_head == collision_index)
is_dirty_head = 1;
if (part->runtime->eviction.policy.lru.clean_tail == collision_index)
is_clean_tail = 1;
if (part->runtime->eviction.policy.lru.dirty_tail == collision_index)
is_dirty_tail = 1;
ENV_BUG_ON((is_clean_tail || is_clean_head) && (is_dirty_tail || is_dirty_head));
/* Set prev and next (even if not existent) */
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
/* Case 1: If we are head AND tail, there is only one node.
* So unlink node and set that there is no node left in the list.
*/
if ((is_clean_head && is_clean_tail) || (is_dirty_head && is_dirty_tail)) {
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
update_lru_head_tail(cache, partition_id, collision_table_entries, cline_dirty);
if (cline_dirty)
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
else
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
update_lru_head_tail(cache, partition_id,
collision_table_entries, cline_dirty);
}
/* Case 2: else if this collision_index is LRU head, but not tail,
* update head and return
*/
else if ((!is_clean_tail && is_clean_head) || (!is_dirty_tail && is_dirty_head)) {
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
update_lru_head(cache, partition_id, next_lru_node, cline_dirty);
eviction.lru.next = collision_table_entries;
eviction_next.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
}
/* Case 3: else if this collision_index is LRU tail, but not head,
* update tail and return
*/
else if ((is_clean_tail && !is_clean_head) || (is_dirty_tail && !is_dirty_head)) {
union eviction_policy_meta eviction_prev;
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
update_lru_tail(cache, partition_id, prev_lru_node, cline_dirty);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
eviction.lru.prev = collision_table_entries;
eviction_prev.lru.next = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
/* Case 4: else this collision_index is a middle node. There is no
* change to the head and the tail pointers.
*/
else {
union eviction_policy_meta eviction_prev;
union eviction_policy_meta eviction_next;
ENV_BUG_ON(!(next_lru_node < collision_table_entries));
ENV_BUG_ON(!(prev_lru_node < collision_table_entries));
ocf_metadata_get_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_get_evicition_policy(cache, prev_lru_node,
&eviction_prev);
/* Update prev and next nodes */
eviction_prev.lru.next = eviction.lru.next;
eviction_next.lru.prev = eviction.lru.prev;
/* Update the given node */
eviction.lru.next = collision_table_entries;
eviction.lru.prev = collision_table_entries;
ocf_metadata_set_evicition_policy(cache, collision_index,
&eviction);
ocf_metadata_set_evicition_policy(cache, next_lru_node,
&eviction_next);
ocf_metadata_set_evicition_policy(cache, prev_lru_node,
&eviction_prev);
}
}
/*-- End of LRU functions*/
void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
union eviction_policy_meta eviction;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
eviction.lru.prev = cache->device->collision_table_entries;
eviction.lru.next = cache->device->collision_table_entries;
ocf_metadata_set_evicition_policy(cache, cline, &eviction);
}
/* the caller must hold the metadata lock */
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
remove_lru_list(cache, part_id, cline, metadata_test_dirty(cache, cline));
}
static void evp_lru_clean_end(void *private_data, int error)
{
env_atomic *cleaning_in_progress = private_data;
env_atomic_set(cleaning_in_progress, 0);
}
static int evp_lru_clean_getter(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line)
{
union eviction_policy_meta eviction;
struct ocf_cleaner_attribs *attribs = getter_context;
ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item;
while (curr_cline < cache->device->collision_table_entries) {
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline));
*line = curr_cline;
attribs->getter_item = prev_cline;
return 0;
}
return -1;
}
static void evp_lru_clean(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t count)
{
env_atomic *progress = &cache->cleaning[part_id];
struct ocf_user_part *part = &cache->user_parts[part_id];
if (ocf_mngt_is_cache_locked(cache))
return;
if (env_atomic_cmpxchg(progress, 0, 1) == 0) {
/* Initialize attributes for cleaner */
struct ocf_cleaner_attribs attribs = {
.cache_line_lock = true,
.do_sort = true,
.cmpl_context = progress,
.cmpl_fn = evp_lru_clean_end,
.getter = evp_lru_clean_getter,
.getter_context = &attribs,
.getter_item = part->runtime->eviction.policy.lru.dirty_tail,
.count = count > 32 ? 32 : count,
.io_queue = io_queue
};
ocf_cleaner_fire(cache, &attribs);
}
}
static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error)
{
env_atomic_dec(&ocf_req->cache->pending_eviction_clines);
}
static void evp_lru_zero_line(struct ocf_cache *cache, uint32_t io_queue,
ocf_cache_line_t line)
{
struct ocf_request *rq;
ocf_core_id_t id;
uint64_t addr, core_line;
ocf_metadata_get_core_info(cache, line, &id, &core_line);
addr = core_line * ocf_line_size(cache);
rq = ocf_rq_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE);
if (rq) {
rq->info.internal = true;
rq->complete = evp_lru_zero_line_complete;
rq->io_queue = io_queue;
env_atomic_inc(&cache->pending_eviction_clines);
ocf_engine_zero_line(rq);
}
}
bool evp_lru_can_evict(struct ocf_cache *cache)
{
if (env_atomic_read(&cache->pending_eviction_clines) >=
OCF_PENDING_EVICTION_LIMIT) {
return false;
}
return true;
}
/* the caller must hold the metadata lock */
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no, ocf_core_id_t core_id)
{
uint32_t i;
ocf_cache_line_t curr_cline, prev_cline;
struct ocf_user_part *part = &cache->user_parts[part_id];
union eviction_policy_meta eviction;
if (cline_no == 0)
return 0;
i = 0;
curr_cline = part->runtime->eviction.policy.lru.clean_tail;
/* Find cachelines to be evicted. */
while (i < cline_no) {
ENV_BUG_ON(curr_cline > cache->device->collision_table_entries);
if (!evp_lru_can_evict(cache))
break;
if (curr_cline == cache->device->collision_table_entries)
break;
ocf_metadata_get_evicition_policy(cache, curr_cline,
&eviction);
prev_cline = eviction.lru.prev;
/* Prevent evicting already locked items */
if (ocf_cache_line_is_used(cache, curr_cline)) {
curr_cline = prev_cline;
continue;
}
ENV_BUG_ON(metadata_test_dirty(cache, curr_cline));
if (ocf_data_obj_is_atomic(&cache->device->obj)) {
/* atomic cache, we have to trim cache lines before
* eviction
*/
evp_lru_zero_line(cache, io_queue, curr_cline);
} else {
set_cache_line_invalid_no_flush(cache, 0,
ocf_line_end_sector(cache),
curr_cline);
/* Goto next item. */
i++;
}
curr_cline = prev_cline;
}
if (i < cline_no && part->runtime->eviction.policy.lru.dirty_tail !=
cache->device->collision_table_entries) {
evp_lru_clean(cache, io_queue, part_id, cline_no - i);
}
/* Return number of clines that were really evicted */
return i;
}
/* the caller must hold the metadata lock */
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline)
{
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline);
struct ocf_user_part *part = &cache->user_parts[part_id];
uint32_t prev_lru_node, next_lru_node;
uint32_t collision_table_entries = cache->device->collision_table_entries;
union eviction_policy_meta eviction;
int cline_dirty;
ocf_metadata_get_evicition_policy(cache, cline, &eviction);
next_lru_node = eviction.lru.next;
prev_lru_node = eviction.lru.prev;
cline_dirty = metadata_test_dirty(cache, cline);
if ((next_lru_node != collision_table_entries) ||
(prev_lru_node != collision_table_entries) ||
((part->runtime->eviction.policy.lru.clean_head == cline) &&
(part->runtime->eviction.policy.lru.clean_tail == cline)) ||
((part->runtime->eviction.policy.lru.dirty_head == cline) &&
(part->runtime->eviction.policy.lru.dirty_tail == cline))) {
remove_lru_list(cache, part_id, cline, cline_dirty);
}
/* Update LRU */
add_lru_head(cache, part_id, cline, cline_dirty);
}
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id)
{
unsigned int collision_table_entries =
cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->eviction.policy.lru.has_clean_nodes = 0;
part->runtime->eviction.policy.lru.has_dirty_nodes = 0;
part->runtime->eviction.policy.lru.clean_head = collision_table_entries;
part->runtime->eviction.policy.lru.clean_tail = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_head = collision_table_entries;
part->runtime->eviction.policy.lru.dirty_tail = collision_table_entries;
}
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 1);
add_lru_head(cache, part_id, cline, 0);
OCF_METADATA_EVICTION_UNLOCK();
}
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id,
uint32_t cline)
{
OCF_METADATA_EVICTION_LOCK();
remove_lru_list(cache, part_id, cline, 0);
add_lru_head(cache, part_id, cline, 1);
OCF_METADATA_EVICTION_UNLOCK();
}

23
src/eviction/lru.h Normal file
View File

@ -0,0 +1,23 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_H__
#define __EVICTION_LRU_H__
#include "eviction.h"
#include "lru_structs.h"
void evp_lru_init_cline(struct ocf_cache *cache,
ocf_cache_line_t cline);
void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
bool evp_lru_can_evict(struct ocf_cache *cache);
uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue,
ocf_part_id_t part_id, uint32_t cline_no,
ocf_core_id_t core_id);
void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline);
void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id);
void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline);
#endif

View File

@ -0,0 +1,24 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __EVICTION_LRU_STRUCTS_H__
#define __EVICTION_LRU_STRUCTS_H__
struct lru_eviction_policy_meta {
/* LRU pointers 2*4=8 bytes */
uint32_t prev;
uint32_t next;
} __attribute__((packed));
struct lru_eviction_policy {
int has_clean_nodes;
int has_dirty_nodes;
uint32_t dirty_head;
uint32_t dirty_tail;
uint32_t clean_head;
uint32_t clean_tail;
};
#endif

108
src/eviction/ops.h Normal file
View File

@ -0,0 +1,108 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef LAYER_EVICTION_POLICY_OPS_H_
#define LAYER_EVICTION_POLICY_OPS_H_
#include "eviction.h"
#include "../metadata/metadata.h"
/**
* @brief Initialize cache line before adding it into eviction
*
* @note This operation is called under WR metadata lock
*/
static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
uint8_t type;
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_cline))
evict_policy_ops[type].init_cline(cache, line);
}
static inline void ocf_eviction_purge_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].rm_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].rm_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline bool ocf_eviction_can_evict(struct ocf_cache *cache)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
if (likely(evict_policy_ops[type].can_evict))
return evict_policy_ops[type].can_evict(cache);
return true;
}
static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache,
uint32_t io_queue, ocf_part_id_t part_id, uint32_t clines,
ocf_core_id_t core_id)
{
uint8_t type;
uint32_t result = 0;
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].req_clines)) {
/*
* This is called under METADATA WR lock. No need to get
* eviction lock.
*/
result = evict_policy_ops[type].req_clines(cache, io_queue,
part_id, clines, core_id);
}
return result;
}
static inline void ocf_eviction_set_hot_cache_line(
struct ocf_cache *cache, ocf_cache_line_t line)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].hot_cline)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].hot_cline(cache, line);
OCF_METADATA_EVICTION_UNLOCK();
}
}
static inline void ocf_eviction_initialize(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
uint8_t type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(type >= ocf_eviction_max);
if (likely(evict_policy_ops[type].init_evp)) {
OCF_METADATA_EVICTION_LOCK();
evict_policy_ops[type].init_evp(cache, part_id);
OCF_METADATA_EVICTION_UNLOCK();
}
}
#endif /* LAYER_EVICTION_POLICY_OPS_H_ */

View File

@ -0,0 +1,114 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "layer_space_management.h"
#include "utils/utils_allocator.h"
#include "utils/utils_part.h"
#include "concurrency/ocf_concurrency.h"
#include "engine/engine_common.h"
#include "eviction/ops.h"
static uint32_t ocf_evict_calculate(struct ocf_user_part *part,
uint32_t to_evict)
{
if (part->runtime->curr_size <= part->config->min_size) {
/*
* Cannot evict from this partition because current size
* is less than minimum size
*/
return 0;
}
if (to_evict < OCF_TO_EVICTION_MIN)
to_evict = OCF_TO_EVICTION_MIN;
if (to_evict > (part->runtime->curr_size - part->config->min_size))
to_evict = part->runtime->curr_size - part->config->min_size;
return to_evict;
}
static inline uint32_t ocf_evict_do(struct ocf_cache *cache,
uint32_t io_queue, const uint32_t evict_cline_no,
ocf_core_id_t core_id, ocf_part_id_t target_part_id)
{
uint32_t to_evict = 0, evicted = 0;
struct ocf_user_part *part;
struct ocf_user_part *target_part = &cache->user_parts[target_part_id];
ocf_part_id_t part_id;
/* For each partition from the lowest priority to highest one */
for_each_part(cache, part, part_id) {
if (!ocf_eviction_can_evict(cache))
goto out;
/*
* Check stop and continue conditions
*/
if (target_part->config->priority > part->config->priority) {
/*
* iterate partition have higher priority, do not evict
*/
break;
}
if (!part->config->flags.eviction) {
/* It seams that no more partition for eviction */
break;
}
if (part_id == target_part_id) {
/* Omit targeted, evict from different first */
continue;
}
if (evicted >= evict_cline_no) {
/* Evicted requested number of cache line, stop */
goto out;
}
to_evict = ocf_evict_calculate(part, evict_cline_no);
if (to_evict == 0) {
/* No cache lines to evict for this partition */
continue;
}
evicted += ocf_eviction_need_space(cache, io_queue,
part_id, to_evict, core_id);
}
if (!ocf_eviction_can_evict(cache))
goto out;
if (evicted < evict_cline_no) {
/* Now we can evict form targeted partition */
to_evict = ocf_evict_calculate(target_part, evict_cline_no);
if (to_evict) {
evicted += ocf_eviction_need_space(cache, io_queue,
target_part_id, to_evict, core_id);
}
}
out:
return evicted;
}
int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no)
{
uint32_t evicted;
if (evict_cline_no <= cache->device->freelist_part->curr_size)
return LOOKUP_MAPPED;
evict_cline_no = evict_cline_no - cache->device->freelist_part->curr_size;
evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no,
req->core_id, req->part_id);
if (evict_cline_no <= evicted)
return LOOKUP_MAPPED;
req->info.eviction_error |= true;
return LOOKUP_MISS;
}

View File

@ -0,0 +1,25 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LAYER_SPACE_MANAGEMENT_H__
#define __LAYER_SPACE_MANAGEMENT_H__
#include "ocf_request.h"
#define OCF_TO_EVICTION_MIN 128UL
/*
* Deallocates space from low priority partitions.
*
* Returns -1 on error
* or the destination partition ID for the free buffers
* (it matches label and is part of the object (#core_id) IO group)
*/
int space_managment_evict_do(struct ocf_cache *cache,
struct ocf_request *req, uint32_t evict_cline_no);
int space_management_free(struct ocf_cache *cache, uint32_t count);
#endif

388
src/metadata/metadata.c Normal file
View File

@ -0,0 +1,388 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../utils/utils_io.h"
#include "../utils/utils_cache_line.h"
#define OCF_METADATA_DEBUG 0
#if 1 == OCF_METADATA_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__)
#else
#define OCF_DEBUG_TRACE(cache)
#endif
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size)
{
struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *)
&cache->metadata.iface;
int ret;
OCF_DEBUG_TRACE(cache);
ENV_BUG_ON(cache->metadata.iface_priv);
ret = ocf_metadata_io_init(cache);
if (ret)
return ret;
*iface = *metadata_hash_get_iface();
ret = cache->metadata.iface.init(cache, cache_line_size);
if (ret)
ocf_metadata_io_deinit(cache);
return ret;
}
int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size,
ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout)
{
OCF_DEBUG_TRACE(cache);
return cache->metadata.iface.init_variable_size(cache, device_size,
cache_line_size, layout);
}
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.layout_iface->init_freelist(cache);
}
void ocf_metadata_init_hash_table(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
cache->metadata.iface.init_hash_table(cache);
}
void ocf_metadata_deinit(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit) {
cache->metadata.iface.deinit(cache);
}
ocf_metadata_io_deinit(cache);
}
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache)
{
OCF_DEBUG_TRACE(cache);
if (cache->metadata.iface.deinit_variable_size)
cache->metadata.iface.deinit_variable_size(cache);
}
size_t ocf_metadata_size_of(struct ocf_cache *cache)
{
return cache->metadata.iface.size_of(cache);
}
void ocf_metadata_error(struct ocf_cache *cache)
{
if (cache->device->metadata_error == 0)
ocf_cache_log(cache, log_err, "Metadata Error\n");
env_bit_clear(ocf_cache_state_running, &cache->cache_state);
cache->device->metadata_error = -1;
}
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache)
{
return cache->metadata.iface.pages(cache);
}
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache)
{
return cache->metadata.iface.cachelines(cache);
}
int ocf_metadata_flush_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.flush_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush(cache, line);
}
int ocf_metadata_load_all(struct ocf_cache *cache)
{
int result;
OCF_METADATA_LOCK_WR();
result = cache->metadata.iface.load_all(cache);
OCF_METADATA_UNLOCK_WR();
return result;
}
int ocf_metadata_load_recovery(struct ocf_cache *cache)
{
return cache->metadata.iface.load_recovery(cache);
}
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
{
cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state,
start, stop);
}
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete)
{
cache->metadata.iface.flush_do_asynch(cache, rq, complete);
}
static inline int ocf_metadata_check_properties(void)
{
uint32_t field_offset;
/* Because metadata basic properties are on the beginning of super block
* read/write only first page of supper block.
*
* For safety reason check if offset of metadata properties are in first
* page of super block.
*
* Maybe in future super block fields order may be changed and metadata
* variant may go out first page of super block
*/
field_offset = offsetof(struct ocf_superblock_config, line_size);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for magic number */
field_offset = offsetof(struct ocf_superblock_config, magic_number);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* The same checking for IO interface type */
field_offset = offsetof(struct ocf_superblock_config, cache_mode);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
/* And the same for version location within superblock structure */
field_offset = offsetof(struct ocf_superblock_config, metadata_version);
ENV_BUG_ON(field_offset >= PAGE_SIZE);
return 0;
}
static int ocf_metadata_read_properties(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
struct ocf_superblock_config *superblock)
{
ctx_data_t *data;
struct ocf_io *io;
int result = 0;
if (ocf_metadata_check_properties())
return -EINVAL;
/* Allocate resources for IO */
io = ocf_dobj_new_io(cache_obj);
data = ctx_data_alloc(ctx, 1);
/* Check allocation result */
if (!io || !data) {
ocf_log(ctx, log_err, "Memory allocation error");
result = -ENOMEM;
goto out;
}
/*
* Read first page of cache device in order to recover metadata
* properties
*/
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_log(ctx, log_err, "Metadata IO configuration error\n");
result = -EIO;
goto out;
}
ocf_io_configure(io, 0, PAGE_SIZE, OCF_READ, 0, 0);
result = ocf_submit_io_wait(io);
if (result) {
ocf_log(ctx, log_err, "Metadata IO request submit error\n");
result = -EIO;
goto out;
}
/* Read data from data into super block buffer */
ctx_data_rd_check(ctx, superblock, data,
PAGE_SIZE);
out:
if (io)
ocf_io_put(io);
ctx_data_free(ctx, data);
return result;
}
/**
* @brief function loads individual properties from metadata set
* @param cache_obj object from which to load metadata
* @param variant - field to which save metadata variant; if NULL,
* metadata variant won't be read.
* @param cache mode; if NULL is passed it won't be read
* @param shutdown_status - dirty shutdown or clean shutdown
* @param dirty_flushed - if all dirty data was flushed prior to closing
* the cache
* @return 0 upon successful completion
*/
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed)
{
struct ocf_superblock_config *superblock;
int err_value = 0;
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_cache_log(cache_obj->cache, log_err,
"Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
err_value = ocf_metadata_read_properties(cache_obj->cache->owner,
cache_obj, superblock);
if (err_value)
goto ocf_metadata_load_variant_ERROR;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
err_value = -ENODATA;
ocf_cache_log(cache_obj->cache, log_info,
"Can not detect pre-existing metadata\n");
goto ocf_metadata_load_variant_ERROR;
}
if (METADATA_VERSION() != superblock->metadata_version) {
err_value = -EBADF;
ocf_cache_log(cache_obj->cache, log_err,
"Metadata version mismatch!\n");
goto ocf_metadata_load_variant_ERROR;
}
if (line_size) {
if (ocf_cache_line_size_is_valid(superblock->line_size)) {
*line_size = superblock->line_size;
} else {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache line size!\n");
}
}
if (layout) {
if (superblock->metadata_layout >= ocf_metadata_layout_max ||
superblock->metadata_layout < 0) {
err_value = -EINVAL;
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid metadata layout!\n");
} else {
*layout = superblock->metadata_layout;
}
}
if (cache_mode) {
if (superblock->cache_mode < ocf_cache_mode_max) {
*cache_mode = superblock->cache_mode;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid cache mode!\n");
err_value = -EINVAL;
}
}
if (shutdown_status != NULL) {
if (superblock->clean_shutdown <= ocf_metadata_clean_shutdown) {
*shutdown_status = superblock->clean_shutdown;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid shutdown status!\n");
err_value = -EINVAL;
}
}
if (dirty_flushed != NULL) {
if (superblock->dirty_flushed <= DIRTY_FLUSHED) {
*dirty_flushed = superblock->dirty_flushed;
} else {
ocf_cache_log(cache_obj->cache, log_err,
"ERROR: Invalid flush status!\n");
err_value = -EINVAL;
}
}
ocf_metadata_load_variant_ERROR:
env_free(superblock);
return err_value;
}
int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj,
bool *clean_shutdown, bool *cache_dirty)
{
struct ocf_superblock_config *superblock;
int result = 0;
OCF_CHECK_NULL(ctx);
OCF_CHECK_NULL(cache_obj);
/* Allocate first page of super block */
superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
if (!superblock) {
ocf_log(ctx, log_err, "Allocation memory error");
return -ENOMEM;
}
OCF_DEBUG_TRACE(cache);
result = ocf_metadata_read_properties(ctx, cache_obj, superblock);
if (result)
goto ocf_metadata_probe_END;
if (superblock->magic_number != CACHE_MAGIC_NUMBER) {
result = -ENODATA;
goto ocf_metadata_probe_END;
}
if (clean_shutdown != NULL) {
*clean_shutdown = (superblock->clean_shutdown !=
ocf_metadata_dirty_shutdown);
}
if (cache_dirty != NULL)
*cache_dirty = (superblock->dirty_flushed == DIRTY_NOT_FLUSHED);
if (METADATA_VERSION() != superblock->metadata_version)
result = -EBADF;
ocf_metadata_probe_END:
env_free(superblock);
return result;
}

336
src/metadata/metadata.h Normal file
View File

@ -0,0 +1,336 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_H__
#define __METADATA_H__
#include "../ocf_cache_priv.h"
#include "../ocf_ctx_priv.h"
static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache)
{
env_spinlock_lock(&cache->metadata.lock.eviction);
}
static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache)
{
env_spinlock_unlock(&cache->metadata.lock.eviction);
}
#define OCF_METADATA_EVICTION_LOCK() \
ocf_metadata_eviction_lock(cache)
#define OCF_METADATA_EVICTION_UNLOCK() \
ocf_metadata_eviction_unlock(cache)
static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_down_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_down_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwsem_up_write(&cache->metadata.lock.collision);
else if (rw == OCF_METADATA_RD)
env_rwsem_up_read(&cache->metadata.lock.collision);
else
ENV_BUG();
}
static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw)
{
int result = -1;
if (rw == OCF_METADATA_WR) {
result = env_rwsem_down_write_trylock(
&cache->metadata.lock.collision);
} else if (rw == OCF_METADATA_RD) {
result = env_rwsem_down_read_trylock(
&cache->metadata.lock.collision);
} else {
ENV_BUG();
}
if (!result)
return -1;
return 0;
}
static inline void ocf_metadata_status_bits_lock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_lock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_lock(&cache->metadata.lock.status);
else
ENV_BUG();
}
static inline void ocf_metadata_status_bits_unlock(
struct ocf_cache *cache, int rw)
{
if (rw == OCF_METADATA_WR)
env_rwlock_write_unlock(&cache->metadata.lock.status);
else if (rw == OCF_METADATA_RD)
env_rwlock_read_unlock(&cache->metadata.lock.status);
else
ENV_BUG();
}
#define OCF_METADATA_LOCK_RD() \
ocf_metadata_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_UNLOCK_RD() \
ocf_metadata_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_RD_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_LOCK_WR() \
ocf_metadata_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_LOCK_WR_TRY() \
ocf_metadata_try_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_UNLOCK_WR() \
ocf_metadata_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_LOCK_RD() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_UNLOCK_RD() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD)
#define OCF_METADATA_BITS_LOCK_WR() \
ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR)
#define OCF_METADATA_BITS_UNLOCK_WR() \
ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR)
#define OCF_METADATA_FLUSH_LOCK() \
ocf_metadata_flush_lock(cache)
#define OCF_METADATA_FLUSH_UNLOCK() \
ocf_metadata_flush_unlock(cache)
#include "metadata_cleaning_policy.h"
#include "metadata_eviction_policy.h"
#include "metadata_partition.h"
#include "metadata_hash.h"
#include "metadata_superblock.h"
#include "metadata_status.h"
#include "metadata_collision.h"
#include "metadata_core.h"
#include "metadata_misc.h"
#define INVALID 0
#define VALID 1
#define CLEAN 2
#define DIRTY 3
/**
* @brief Initialize metadata
*
* @param cache - Cache instance
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init(struct ocf_cache *cache,
ocf_cache_line_size_t cache_line_size);
/**
* @brief Initialize per-cacheline metadata
*
* @param cache - Cache instance
* @param device_size - Device size in bytes
* @param cache_line_size Cache line size
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_init_variable_size(struct ocf_cache *cache,
uint64_t device_size, ocf_cache_line_size_t cache_line_size,
ocf_metadata_layout_t layout);
/**
* @brief Initialize collision table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_freelist_partition(struct ocf_cache *cache);
/**
* @brief Initialize hash table
*
* @param cache - Cache instance
*/
void ocf_metadata_init_hash_table(struct ocf_cache *cache);
/**
* @brief De-Initialize metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit(struct ocf_cache *cache);
/**
* @brief De-Initialize per-cacheline metadata
*
* @param cache - Cache instance
*/
void ocf_metadata_deinit_variable_size(struct ocf_cache *cache);
/**
* @brief Get memory footprint
*
* @param cache - Cache instance
* @return 0 - memory footprint
*/
size_t ocf_metadata_size_of(struct ocf_cache *cache);
/**
* @brief Handle metadata error
*
* @param cache - Cache instance
*/
void ocf_metadata_error(struct ocf_cache *cache);
/**
* @brief Get amount of cache lines
*
* @param cache - Cache instance
* @return Amount of cache lines (cache device lines - metadata space)
*/
ocf_cache_line_t
ocf_metadata_get_cachelines_count(struct ocf_cache *cache);
/**
* @brief Get amount of pages required for metadata
*
* @param cache - Cache instance
* @return Pages required for store metadata on cache device
*/
ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache);
/**
* @brief Flush metadata
*
* @param cache
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_flush_all(struct ocf_cache *cache);
/**
* @brief Flush metadata for specified cache line
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line);
/**
* @brief Mark specified cache line to be flushed
*
* @param[in] cache - Cache instance
* @param[in] line - cache line which to be flushed
*/
void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq,
uint32_t map_idx, int to_state, uint8_t start, uint8_t stop);
/**
* @brief Flush marked cache lines asynchronously
*
* @param cache - Cache instance
* @param queue - I/O queue to which metadata flush should be submitted
* @param remaining - request remaining
* @param complete - flushing request callback
* @param context - context that will be passed into callback
*/
void ocf_metadata_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, ocf_end_t complete);
/**
* @brief Load metadata
*
* @param cache - Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_all(struct ocf_cache *cache);
/**
* @brief Load metadata required for recovery procedure
*
* @param cache Cache instance
* @return 0 - Operation success otherwise failure
*/
int ocf_metadata_load_recovery(struct ocf_cache *cache);
/*
* NOTE Hash table is specific for hash table metadata service implementation
* and should be used internally by metadata service.
* At the moment there is no high level metadata interface because of that
* temporary defined in this file.
*/
static inline ocf_cache_line_t
ocf_metadata_get_hash(struct ocf_cache *cache, ocf_cache_line_t index)
{
return cache->metadata.iface.get_hash(cache, index);
}
static inline void ocf_metadata_set_hash(struct ocf_cache *cache,
ocf_cache_line_t index, ocf_cache_line_t line)
{
cache->metadata.iface.set_hash(cache, index, line);
}
static inline void ocf_metadata_flush_hash(struct ocf_cache *cache,
ocf_cache_line_t index)
{
cache->metadata.iface.flush_hash(cache, index);
}
static inline ocf_cache_line_t ocf_metadata_entries_hash(
struct ocf_cache *cache)
{
return cache->metadata.iface.entries_hash(cache);
}
int ocf_metadata_load_properties(ocf_data_obj_t cache_obj,
ocf_cache_line_size_t *line_size,
ocf_metadata_layout_t *layout,
ocf_cache_mode_t *cache_mode,
enum ocf_metadata_shutdown_status *shutdown_status,
uint8_t *dirty_flushed);
/**
* @brief Validate cache line size
*
* @param size Cache line size
* @return true - cache line size is valid, false - cache line is invalid
*/
static inline bool ocf_metadata_line_size_is_valid(uint32_t size)
{
switch (size) {
case 4 * KiB:
case 8 * KiB:
case 16 * KiB:
case 32 * KiB:
case 64 * KiB:
return true;
default:
return false;
}
}
#endif /* METADATA_H_ */

240
src/metadata/metadata_bit.h Normal file
View File

@ -0,0 +1,240 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
/*******************************************************************************
* Sector mask getter
******************************************************************************/
static inline uint64_t _get_mask(uint8_t start, uint8_t stop)
{
uint64_t mask = 0;
ENV_BUG_ON(start >= 64);
ENV_BUG_ON(stop >= 64);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (63 - stop);
mask <<= start;
return mask;
}
#define _get_mask_u8(start, stop) _get_mask(start, stop)
#define _get_mask_u16(start, stop) _get_mask(start, stop)
#define _get_mask_u32(start, stop) _get_mask(start, stop)
#define _get_mask_u64(start, stop) _get_mask(start, stop)
typedef __uint128_t u128;
static inline u128 _get_mask_u128(uint8_t start, uint8_t stop)
{
u128 mask = 0;
ENV_BUG_ON(start >= 128);
ENV_BUG_ON(stop >= 128);
ENV_BUG_ON(stop < start);
mask = ~mask;
mask >>= start + (127 - stop);
mask <<= start;
return mask;
}
#define ocf_metadata_bit_struct(type) \
struct ocf_metadata_map_##type { \
struct ocf_metadata_map map; \
type valid; \
type dirty; \
} __attribute__((packed))
#define ocf_metadata_bit_func(what, type) \
static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
return true; \
} else { \
return false; \
} \
} else { \
if (map[line].what & mask) { \
return true; \
} else { \
return false; \
} \
} \
} \
\
static bool _ocf_metadata_test_out_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
const struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (map[line].what & ~mask) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_clear_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
map[line].what &= ~mask; \
\
if (map[line].what) { \
return true; \
} else { \
return false; \
} \
} \
\
static bool _ocf_metadata_set_##what##_##type(struct ocf_cache *cache, \
ocf_cache_line_t line, uint8_t start, uint8_t stop) \
{ \
bool result; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
result = map[line].what ? true : false; \
\
map[line].what |= mask; \
\
return result; \
} \
\
static bool _ocf_metadata_test_and_set_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what |= mask; \
return test; \
} \
\
static bool _ocf_metadata_test_and_clear_##what##_##type( \
struct ocf_cache *cache, ocf_cache_line_t line, \
uint8_t start, uint8_t stop, bool all) \
{ \
bool test; \
type mask = _get_mask_##type(start, stop); \
\
struct ocf_metadata_hash_ctrl *ctrl = \
(struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \
\
struct ocf_metadata_raw *raw = \
&ctrl->raw_desc[metadata_segment_collision]; \
\
struct ocf_metadata_map_##type *map = raw->mem_pool; \
\
_raw_bug_on(raw, line, sizeof(*map)); \
\
if (all) { \
if (mask == (map[line].what & mask)) { \
test = true; \
} else { \
test = false; \
} \
} else { \
if (map[line].what & mask) { \
test = true; \
} else { \
test = false; \
} \
} \
\
map[line].what &= ~mask; \
return test; \
} \
ocf_metadata_bit_struct(u8);
ocf_metadata_bit_struct(u16);
ocf_metadata_bit_struct(u32);
ocf_metadata_bit_struct(u64);
ocf_metadata_bit_struct(u128);
ocf_metadata_bit_func(dirty, u8);
ocf_metadata_bit_func(dirty, u16);
ocf_metadata_bit_func(dirty, u32);
ocf_metadata_bit_func(dirty, u64);
ocf_metadata_bit_func(dirty, u128);
ocf_metadata_bit_func(valid, u8);
ocf_metadata_bit_func(valid, u16);
ocf_metadata_bit_func(valid, u32);
ocf_metadata_bit_func(valid, u64);
ocf_metadata_bit_func(valid, u128);

View File

@ -0,0 +1,39 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CLEANING_POLICY_H__
#define __METADATA_CLEANING_POLICY_H__
/*
* GET
*/
static inline void
ocf_metadata_get_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.get_cleaning_policy(cache, line, policy);
}
/*
* SET
*/
static inline void
ocf_metadata_set_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line, struct cleaning_policy_meta *policy)
{
cache->metadata.iface.set_cleaning_policy(cache, line, policy);
}
/*
* FLUSH
*/
static inline void
ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
cache->metadata.iface.flush_cleaning_policy(cache, line);
}
#endif /* METADATA_CLEANING_POLICY_H_ */

View File

@ -0,0 +1,88 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
/*
*
*/
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line)
{
ocf_cache_line_t prev_cache_line = ocf_metadata_get_hash(cache, hash);
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(hash < hash_entries));
ENV_BUG_ON(!(cache_line < line_entries));
/* Setup new node */
ocf_metadata_set_core_info(cache, cache_line, core_id,
core_line);
/* Update collision info:
* - next is set to value from hash table;
* - previous is set to collision table entries value
*/
ocf_metadata_set_collision_info(cache, cache_line, prev_cache_line,
line_entries);
/* Update previous head */
if (prev_cache_line != line_entries) {
ocf_metadata_set_collision_prev(cache, prev_cache_line,
cache_line);
}
/* Update hash Table: hash table contains pointer to
* collision table so it contains indexes in collision table
*/
ocf_metadata_set_hash(cache, hash, cache_line);
}
/*
*
*/
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id)
{
ocf_core_id_t core_id;
uint64_t core_sector;
ocf_cache_line_t hash_father;
ocf_cache_line_t prev_line, next_line;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_cache_line_t hash_entries = cache->device->hash_table_entries;
ENV_BUG_ON(!(line < line_entries));
ocf_metadata_get_collision_info(cache, line, &next_line, &prev_line);
/* Update previous node if any. */
if (prev_line != line_entries)
ocf_metadata_set_collision_next(cache, prev_line, next_line);
/* Update next node if any. */
if (next_line != line_entries)
ocf_metadata_set_collision_prev(cache, next_line, prev_line);
ocf_metadata_get_core_info(cache, line, &core_id, &core_sector);
/* Update hash table, because if it was pointing to the given node it
* must now point to the given's node next
*/
hash_father = ocf_metadata_hash_func(cache, core_sector, core_id);
ENV_BUG_ON(!(hash_father < hash_entries));
if (ocf_metadata_get_hash(cache, hash_father) == line)
ocf_metadata_set_hash(cache, hash_father, next_line);
ocf_metadata_set_collision_info(cache, line,
line_entries, line_entries);
ocf_metadata_set_core_info(cache, line,
OCF_CORE_MAX, ULLONG_MAX);
}

View File

@ -0,0 +1,102 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_COLLISION_H__
#define __METADATA_COLLISION_H__
/**
* @brief Metadata map structure
*/
struct ocf_metadata_list_info {
ocf_cache_line_t prev_col;
/*!< Previous cache line in collision list */
ocf_cache_line_t next_col;
/*!< Next cache line in collision list*/
ocf_cache_line_t partition_prev;
/*!< Previous cache line in the same partition*/
ocf_cache_line_t partition_next;
/*!< Next cache line in the same partition*/
ocf_part_id_t partition_id : 8;
/*!< ID of partition where is assigned this cache line*/
} __attribute__((packed));
/**
* @brief Metadata map structure
*/
struct ocf_metadata_map {
uint64_t core_line;
/*!< Core line addres on cache mapped by this strcture */
uint16_t core_id;
/*!< ID of core where is assigned this cache line*/
uint8_t status[];
/*!< Entry status structure e.g. valid, dirty...*/
} __attribute__((packed));
static inline ocf_cache_line_t ocf_metadata_map_lg2phy(
struct ocf_cache *cache, ocf_cache_line_t coll_idx)
{
return cache->metadata.iface.layout_iface->lg2phy(cache,
coll_idx);
}
static inline ocf_cache_line_t ocf_metadata_map_phy2lg(
struct ocf_cache *cache, ocf_cache_line_t cache_line)
{
return cache->metadata.iface.layout_iface->phy2lg(cache,
cache_line);
}
static inline void ocf_metadata_set_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next, ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_get_collision_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t *next, ocf_cache_line_t *prev)
{
cache->metadata.iface.get_collision_info(cache, line, next, prev);
}
static inline void ocf_metadata_set_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next)
{
cache->metadata.iface.set_collision_next(cache, line, next);
}
static inline void ocf_metadata_set_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev)
{
cache->metadata.iface.set_collision_prev(cache, line, prev);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_collision_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_collision_prev(cache, line);
}
void ocf_metadata_add_to_collision(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t core_line,
ocf_cache_line_t hash, ocf_cache_line_t cache_line);
void ocf_metadata_remove_from_collision(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_part_id_t part_id);
#endif /* METADATA_COLLISION_H_ */

View File

@ -0,0 +1,51 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_CORE_H__
#define __METADATA_CORE_H__
static inline void ocf_metadata_set_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t core_id,
uint64_t core_sector)
{
cache->metadata.iface.set_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_info(struct ocf_cache *cache,
ocf_cache_line_t line, ocf_core_id_t *core_id,
uint64_t *core_sector)
{
cache->metadata.iface.get_core_info(cache, line, core_id,
core_sector);
}
static inline void ocf_metadata_get_core_and_part_id(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_core_id_t *core_id, ocf_part_id_t *part_id)
{
cache->metadata.iface.get_core_and_part_id(cache, line, core_id,
part_id);
}
static inline ocf_core_id_t ocf_metadata_get_core_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_id(cache, line);
}
static inline uint64_t ocf_metadata_get_core_sector(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_core_sector(cache, line);
}
static inline struct ocf_metadata_uuid *ocf_metadata_get_core_uuid(
struct ocf_cache *cache, ocf_core_id_t core_id)
{
return cache->metadata.iface.get_core_uuid(cache, core_id);
}
#endif /* METADATA_CORE_H_ */

View File

@ -0,0 +1,35 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_EVICTION_H__
#define __METADATA_EVICTION_H__
static inline void ocf_metadata_get_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.get_eviction_policy(cache, line, eviction);
}
/*
* SET
*/
static inline void ocf_metadata_set_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line,
union eviction_policy_meta *eviction)
{
cache->metadata.iface.set_eviction_policy(cache, line, eviction);
}
/*
* FLUSH
*/
static inline void ocf_metadata_flush_evicition_policy(
struct ocf_cache *cache, ocf_cache_line_t line)
{
cache->metadata.iface.flush_eviction_policy(cache, line);
}
#endif /* METADATA_EVICTION_H_ */

2462
src/metadata/metadata_hash.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_HASH_H__
#define __METADATA_HASH_H__
/**
* @file metadata_.h
* @brief Metadata Service - Hash Implementation
*/
#include "../ocf_request.h"
/**
* @brief Metada hash elements type
*/
enum ocf_metadata_segment {
metadata_segment_sb_config = 0, /*!< Super block conf */
metadata_segment_sb_runtime, /*!< Super block runtime */
metadata_segment_reserved, /*!< Reserved space on disk */
metadata_segment_core_config, /*!< Core Config Metadata */
metadata_segment_core_runtime, /*!< Core Runtime Metadata */
metadata_segment_core_uuid, /*!< Core UUID */
/* .... new fixed size sections go here */
metadata_segment_fixed_size_max,
metadata_segment_variable_size_start = metadata_segment_fixed_size_max,
/* sections with size dependent on cache device size go here: */
metadata_segment_cleaning = /*!< Cleaning policy */
metadata_segment_variable_size_start,
metadata_segment_eviction, /*!< Eviction policy */
metadata_segment_collision, /*!< Collision */
metadata_segment_list_info, /*!< Collision */
metadata_segment_hash, /*!< Hash */
/* .... new variable size sections go here */
metadata_segment_max, /*!< MAX */
};
/**
* @brief Get metadata interface implementation
*
* @return metadata interface
*/
const struct ocf_metadata_iface *metadata_hash_get_iface(void);
#endif /* METADATA_HASH_H_ */

629
src/metadata/metadata_io.c Normal file
View File

@ -0,0 +1,629 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_io.h"
#include "../ocf_priv.h"
#include "../engine/cache_engine.h"
#include "../engine/engine_common.h"
#include "../engine/engine_bf.h"
#include "../utils/utils_cache_line.h"
#include "../utils/utils_allocator.h"
#include "../utils/utils_io.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_IO_DEBUG 0
#if 1 == OCF_METADATA_IO_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
static void metadata_io_write_i_end_asynch(void *private_data, int error);
static int ocf_restart_meta_io(struct ocf_request *req);
static struct ocf_io_if meta_restart_if = {
.read = ocf_restart_meta_io,
.write = ocf_restart_meta_io
};
/*
* Get max pages for IO
*/
static uint32_t metadata_io_max_page(struct ocf_cache *cache)
{
return ocf_data_obj_get_max_io_size(&cache->device->obj) /
BYTES_TO_SECTORS(PAGE_SIZE);
}
/*
* Iterative read end callback
*/
static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
{
struct metadata_io_request_atomic *meta_atom_req = io->priv1;
OCF_DEBUG_TRACE(ocf_data_obj_get_cache(io->obj));
meta_atom_req->error |= error;
env_completion_complete(&meta_atom_req->complete);
}
/*
* Iterative read request
*/
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl)
{
uint64_t i;
uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
uint64_t io_sectors_count = cache->device->collision_table_entries *
ocf_line_sectors(cache);
uint64_t count, curr_count;
int result = 0;
struct ocf_io *io;
ctx_data_t *data;
struct metadata_io_request_atomic meta_atom_req;
unsigned char step = 0;
OCF_DEBUG_TRACE(cache);
/* Allocate one 4k page for metadata*/
data = ctx_data_alloc(cache->owner, 1);
if (!data)
return -ENOMEM;
count = io_sectors_count;
for (i = 0; i < io_sectors_count; i += curr_count) {
/* Get sectors count of this IO iteration */
curr_count = MIN(max_sectors_count, count);
env_completion_init(&meta_atom_req.complete);
meta_atom_req.error = 0;
/* Reset position in data buffer */
ctx_data_seek(cache->owner, data, ctx_data_seek_begin, 0);
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
result = -ENOMEM;
break;
}
/* Setup IO */
ocf_io_configure(io,
cache->device->metadata_offset +
SECTORS_TO_BYTES(i),
SECTORS_TO_BYTES(curr_count),
OCF_READ, 0, 0);
ocf_io_set_cmpl(io, &meta_atom_req, NULL,
metadata_io_read_i_atomic_end);
result = ocf_io_set_data(io, data, 0);
if (result) {
ocf_io_put(io);
break;
}
/* Submit IO */
ocf_dobj_submit_metadata(io);
ocf_io_put(io);
/* Wait for completion of IO */
env_completion_wait(&meta_atom_req.complete);
/* Check for error */
if (meta_atom_req.error) {
result = meta_atom_req.error;
break;
}
result |= hndl(cache, i, curr_count, data);
if (result)
break;
count -= curr_count;
OCF_COND_RESCHED(step, 128);
}
/* Memory free */
ctx_data_free(cache->owner, data);
return result;
}
static int ocf_restart_meta_io(struct ocf_request *req)
{
struct ocf_io *io;
struct metadata_io_request *meta_io_req;
struct ocf_cache *cache;
int i;
int ret;
cache = req->cache;
meta_io_req = req->priv;
/* Fill with the latest metadata. */
OCF_METADATA_LOCK_RD();
for (i = 0; i < meta_io_req->count; i++) {
meta_io_req->on_meta_fill(cache, meta_io_req->data,
meta_io_req->page + i, meta_io_req->context);
}
OCF_METADATA_UNLOCK_RD();
io = ocf_new_cache_io(cache);
if (!io) {
metadata_io_write_i_end_asynch(meta_io_req, -ENOMEM);
return 0;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(meta_io_req->page),
PAGES_TO_BYTES(meta_io_req->count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, meta_io_req,
metadata_io_write_i_end_asynch);
ret = ocf_io_set_data(io, meta_io_req->data, 0);
if (ret) {
ocf_io_put(io);
metadata_io_write_i_end_asynch(meta_io_req, ret);
return ret;
}
ocf_dobj_submit_io(io);
return 0;
}
/*
* Iterative asynchronous write callback
*/
static void metadata_io_write_i_end_asynch(void *private_data, int error)
{
struct metadata_io_request *request = (private_data);
struct metadata_io_request_asynch *a_req;
struct ocf_cache *cache;
OCF_CHECK_NULL(request);
cache = request->cache;
a_req = request->asynch;
OCF_CHECK_NULL(a_req);
OCF_CHECK_NULL(a_req->on_complete);
if (error) {
request->error |= error;
request->asynch->error |= error;
}
if (env_atomic_dec_return(&request->req_remaining))
return;
OCF_DEBUG_PARAM(cache, "Page = %u", request->page);
ctx_data_free(cache->owner, request->data);
request->data = NULL;
if (env_atomic_dec_return(&a_req->req_remaining)) {
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
return;
}
OCF_DEBUG_MSG(cache, "Asynchronous IO completed");
/* All IOs have been finished, call IO end callback */
a_req->on_complete(request->cache, a_req->context, request->error);
/*
* If it's last request, we mark is as finished
* after calling IO end callback
*/
env_atomic_set(&request->finished, 1);
ocf_metadata_updater_kick(cache);
}
static void metadata_io_req_error(struct ocf_cache *cache,
struct metadata_io_request_asynch *a_req,
uint32_t i, int error)
{
a_req->error |= error;
a_req->reqs[i].error |= error;
a_req->reqs[i].count = 0;
if (a_req->reqs[i].data)
ctx_data_free(cache->owner, a_req->reqs[i].data);
a_req->reqs[i].data = NULL;
}
/*
* Iterative write request asynchronously
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl)
{
uint32_t curr_count, written;
uint32_t max_count = metadata_io_max_page(cache);
uint32_t io_count = DIV_ROUND_UP(count, max_count);
uint32_t i, i_fill;
int error = 0, ret;
struct ocf_io *io;
/* Allocation and initialization of asynchronous metadata IO request */
struct metadata_io_request_asynch *a_req;
if (count == 0)
return 0;
a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO);
if (!a_req)
return -OCF_ERR_NO_MEM;
env_atomic_set(&a_req->req_remaining, io_count);
env_atomic_set(&a_req->req_active, io_count);
a_req->on_complete = compl_hndl;
a_req->context = context;
a_req->page = page;
/* Allocate particular requests and initialize them */
OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]),
io_count, &a_req->reqs_limit);
if (!a_req->reqs) {
env_free(a_req);
ocf_cache_log(cache, log_warn,
"No memory during metadata IO\n");
return -OCF_ERR_NO_MEM;
}
/* IO Requests initialization */
for (i = 0; i < io_count; i++) {
env_atomic_set(&(a_req->reqs[i].req_remaining), 1);
env_atomic_set(&(a_req->reqs[i].finished), 0);
a_req->reqs[i].asynch = a_req;
}
OCF_DEBUG_PARAM(cache, "IO count = %u", io_count);
i = 0;
written = 0;
while (count) {
/* Get pages count of this IO iteration */
if (count > max_count)
curr_count = max_count;
else
curr_count = count;
/* Fill request */
a_req->reqs[i].cache = cache;
a_req->reqs[i].context = context;
a_req->reqs[i].page = page + written;
a_req->reqs[i].count = curr_count;
a_req->reqs[i].on_meta_fill = fill_hndl;
a_req->reqs[i].fl_req.io_if = &meta_restart_if;
a_req->reqs[i].fl_req.io_queue = queue;
a_req->reqs[i].fl_req.cache = cache;
a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
a_req->reqs[i].fl_req.info.internal = true;
/*
* We don't want allocate map for this request in
* threads.
*/
a_req->reqs[i].fl_req.map = LIST_POISON1;
INIT_LIST_HEAD(&a_req->reqs[i].list);
a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count);
if (!a_req->reqs[i].data) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
/* Issue IO if it is not overlapping with anything else */
ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]);
if (ret == 0) {
/* Allocate new IO */
io = ocf_new_cache_io(cache);
if (!io) {
error = -OCF_ERR_NO_MEM;
metadata_io_req_error(cache, a_req, i, error);
break;
}
for (i_fill = 0; i_fill < curr_count; i_fill++) {
fill_hndl(cache, a_req->reqs[i].data,
page + written + i_fill,
context);
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(a_req->reqs[i].page),
PAGES_TO_BYTES(a_req->reqs[i].count),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, &a_req->reqs[i],
metadata_io_write_i_end_asynch);
error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
if (error) {
ocf_io_put(io);
metadata_io_req_error(cache, a_req, i, error);
break;
}
ocf_dobj_submit_io(io);
}
count -= curr_count;
written += curr_count;
i++;
}
if (error == 0) {
/* No error, return 0 that indicates operation successful */
return 0;
}
OCF_DEBUG_MSG(cache, "ERROR");
if (i == 0) {
/*
* If no requests were submitted, we just call completion
* callback, free memory and return error.
*/
compl_hndl(cache, context, error);
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
return error;
}
/*
* Decrement total reaming requests with IO that were not triggered.
* If we reached zero, we need to call completion callback.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0)
compl_hndl(cache, context, error);
/*
* Decrement total active requests with IO that were not triggered.
* If we reached zero, we need to free memory.
*/
if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) {
OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
env_free(a_req);
}
return error;
}
int ocf_metadata_io_init(ocf_cache_t cache)
{
return ocf_metadata_updater_init(cache);
}
void ocf_metadata_io_deinit(ocf_cache_t cache)
{
ocf_metadata_updater_stop(cache);
}
static void metadata_io_end(struct ocf_io *io, int error)
{
struct metadata_io *mio = io->priv1;
ctx_data_t *data = ocf_io_get_data(io);
uint32_t page = BYTES_TO_PAGES(io->addr);
uint32_t count = BYTES_TO_PAGES(io->bytes);
struct ocf_cache *cache = mio->cache;
uint32_t i = 0;
if (error) {
mio->error |= error;
goto out;
}
for (i = 0; mio->dir == OCF_READ && i < count; i++) {
mio->error |= mio->hndl_fn(cache, data, page + i,
mio->hndl_cntx);
}
out:
ctx_data_free(cache->owner, data);
ocf_io_put(io);
if (env_atomic_dec_return(&mio->rq_remaining))
return;
env_completion_complete(&mio->completion);
}
static int metadata_submit_io(
struct ocf_cache *cache,
struct metadata_io *mio,
uint32_t count,
uint32_t written)
{
ctx_data_t *data;
struct ocf_io *io;
int err;
int i;
/* Allocate IO */
io = ocf_new_cache_io(cache);
if (!io) {
err = -ENOMEM;
goto error;
}
/* Allocate data buffer for this IO */
data = ctx_data_alloc(cache->owner, count);
if (!data) {
err = -ENOMEM;
goto put_io;
}
/* Fill data */
for (i = 0; mio->dir == OCF_WRITE && i < count; i++) {
err = mio->hndl_fn(cache, data,
mio->page + written + i, mio->hndl_cntx);
if (err)
goto free_data;
}
/* Setup IO */
ocf_io_configure(io,
PAGES_TO_BYTES(mio->page + written),
PAGES_TO_BYTES(count),
mio->dir, 0, 0);
ocf_io_set_cmpl(io, mio, NULL, metadata_io_end);
err = ocf_io_set_data(io, data, 0);
if (err)
goto free_data;
/* Submit IO */
env_atomic_inc(&mio->rq_remaining);
ocf_dobj_submit_io(io);
return 0;
free_data:
ctx_data_free(cache->owner, data);
put_io:
ocf_io_put(io);
error:
mio->error = err;
return err;
}
/*
*
*/
static int metadata_io(struct metadata_io *mio)
{
uint32_t max_count = metadata_io_max_page(mio->cache);
uint32_t this_count, written = 0;
uint32_t count = mio->count;
unsigned char step = 0;
int err;
struct ocf_cache *cache = mio->cache;
/* Check direction value correctness */
switch (mio->dir) {
case OCF_WRITE:
case OCF_READ:
break;
default:
return -EINVAL;
}
env_atomic_set(&mio->rq_remaining, 1);
env_completion_init(&mio->completion);
while (count) {
this_count = MIN(count, max_count);
err = metadata_submit_io(cache, mio, this_count, written);
if (err)
break;
/* Update counters */
count -= this_count;
written += this_count;
OCF_COND_RESCHED(step, 128);
}
if (env_atomic_dec_return(&mio->rq_remaining) == 0)
env_completion_complete(&mio->completion);
/* Wait for all IO to be finished */
env_completion_wait(&mio->completion);
return mio->error;
}
/*
*
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx)
{
struct metadata_io mio = {
.dir = OCF_READ,
.cache = cache,
.page = page,
.count = count,
.hndl_fn = hndl_fn,
.hndl_cntx = hndl_cntx,
};
return metadata_io(&mio);
}
/*
*
*/
static int metadata_io_write_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE);
return 0;
}
/*
* Write request
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page)
{
struct metadata_io mio = {
.dir = OCF_WRITE,
.cache = cache,
.page = page,
.count = 1,
.hndl_fn = metadata_io_write_fill,
.hndl_cntx = data,
};
return metadata_io(&mio);
}

188
src/metadata/metadata_io.h Normal file
View File

@ -0,0 +1,188 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_IO_H__
#define __METADATA_IO_H__
/**
* @file metadata_io.h
* @brief Metadata IO utilities
*/
/**
* @brief Metadata IO event
*
* The client of metadata IO service if informed trough this event:
* - on completion of read from cache device
* - on fill data which will be written into cache device
*
* @param data[in,out] Environment data for read ot write IO
* @param page[in] Page which is issued
* @param context[in] context caller
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_io_event_t)(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context);
/**
* @brief Metadata write end callback
*
* @param cache - Cache instance
* @param context - Read context
* @param error - error
* @param page - page that was written
*/
typedef void (*ocf_metadata_io_hndl_on_write_t)(struct ocf_cache *cache,
void *context, int error);
struct metadata_io_request_asynch;
/*
* IO request context
*/
struct metadata_io_request {
struct ocf_cache *cache;
void *context;
uint32_t page;
uint32_t count;
ocf_metadata_io_event_t on_meta_fill;
env_atomic req_remaining;
ctx_data_t *data;
env_completion completion;
int error;
struct metadata_io_request_asynch *asynch;
env_atomic finished;
struct ocf_request fl_req;
struct list_head list;
};
/*
* IO request context
*/
struct metadata_io_request_atomic {
env_completion complete;
int error;
};
/*
*
*/
struct metadata_io {
int error;
int dir;
struct ocf_cache *cache;
uint32_t page;
uint32_t count;
env_completion completion;
env_atomic rq_remaining;
ocf_metadata_io_event_t hndl_fn;
void *hndl_cntx;
};
/*
* Asynchronous IO request context
*/
struct metadata_io_request_asynch {
struct ocf_cache *cache;
struct metadata_io_request *reqs;
void *context;
int error;
size_t reqs_limit;
env_atomic req_remaining;
env_atomic req_active;
uint32_t page;
ocf_metadata_io_hndl_on_write_t on_complete;
};
/**
* @brief Metadata read end callback
*
* @param cache Cache instance
* @param sector_addr Begin sector of metadata
* @param sector_no Number of sectors
* @param data Data environment buffer with atomic metadata
*
* @retval 0 Success
* @retval Non-zero Error which will bee finally returned to the caller
*/
typedef int (*ocf_metadata_atomic_io_event_t)(
struct ocf_cache *cache, uint64_t sector_addr,
uint32_t sector_no, ctx_data_t *data);
/**
* @brief Write page request
*
* @param cache - Cache instance
* @param data - Data to be written for specified page
* @param page - Page of SSD (cache device) where data has to be placed
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write(struct ocf_cache *cache,
void *data, uint32_t page);
int metadata_io_read_i_atomic(struct ocf_cache *cache,
ocf_metadata_atomic_io_event_t hndl);
/**
* @brief Iterative pages write
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param hndl_fn - Fill callback is called to fill each pages with data
* @param hndl_cntx - Caller context which is passed on fill callback request
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* * @brief Iterative pages read
*
* @param cache - Cache instance
* @param page - Start page of SSD (cache device) of data will be read
* @param count - Counts of page to be processed
* @param hndl_fn - Callback function is called on each page read completion
* @param hndl_cntx - Caller context passed during handle function call
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_read_i(struct ocf_cache *cache,
uint32_t page, uint32_t count,
ocf_metadata_io_event_t hndl_fn, void *hndl_cntx);
/**
* @brief Iterative asynchronous pages write
*
* @param cache - Cache instance
* @param context - Read context
* @param page - Start page of SSD (cache device) where data will be written
* @param count - Counts of page to be processed
* @param fill - Fill callback
* @param complete - All IOs completed callback
*
* @return 0 - No errors, otherwise error occurred
*/
int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue,
void *context, uint32_t page, uint32_t count,
ocf_metadata_io_event_t fill_hndl,
ocf_metadata_io_hndl_on_write_t compl_hndl);
/**
* Function for initializing metadata io.
*/
int ocf_metadata_io_init(ocf_cache_t cache);
/**
* Function for deinitializing metadata io.
*/
void ocf_metadata_io_deinit(ocf_cache_t cache);
#endif /* METADATA_IO_UTILS_H_ */

View File

@ -0,0 +1,126 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_cache_line.h"
static bool _is_cache_line_acting(struct ocf_cache *cache,
uint32_t cache_line, ocf_core_id_t core_id,
uint64_t start_line, uint64_t end_line)
{
ocf_core_id_t tmp_core_id;
uint64_t core_line;
ocf_metadata_get_core_info(cache, cache_line,
&tmp_core_id, &core_line);
if (core_id != OCF_CORE_ID_INVALID) {
if (core_id != tmp_core_id)
return false;
if (core_line < start_line || core_line > end_line)
return false;
} else if (tmp_core_id == OCF_CORE_ID_INVALID) {
return false;
}
return true;
}
/*
* Iterates over cache lines that belong to the core device with
* core ID = core_id whose core byte addresses are in the range
* [start_byte, end_byte] and applies actor(cache, cache_line) to all
* matching cache lines
*
* set partition_id to PARTITION_INVALID to not care about partition_id
*
* METADATA lock must be held before calling this function
*/
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor)
{
uint32_t step = 0;
ocf_cache_line_t i, next_i;
uint64_t start_line, end_line;
int ret = 0;
start_line = ocf_bytes_2_lines(cache, start_byte);
end_line = ocf_bytes_2_lines(cache, end_byte);
if (part_id != PARTITION_INVALID) {
for (i = cache->user_parts[part_id].runtime->head;
i != cache->device->collision_table_entries;
i = next_i) {
next_i = ocf_metadata_get_partition_next(cache, i);
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
} else {
for (i = 0; i < cache->device->collision_table_entries; ++i) {
if (_is_cache_line_acting(cache, i, core_id,
start_line, end_line)) {
if (ocf_cache_line_is_used(cache, i))
ret = -EAGAIN;
else
actor(cache, i);
}
OCF_COND_RESCHED_DEFAULT(step);
}
}
return ret;
}
/* the caller must hold the relevant cache block concurrency reader lock
* and the metadata lock
*/
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
ocf_part_id_t partition_id =
ocf_metadata_get_partition_id(cache, cache_line);
ocf_metadata_remove_from_collision(cache, cache_line, partition_id);
ocf_metadata_remove_from_partition(cache, partition_id, cache_line);
ocf_metadata_add_to_free_list(cache, cache_line);
}
static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
uint32_t cache_line)
{
set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache),
cache_line);
/*
* This is especially for removing inactive core
*/
metadata_clear_dirty(cache, cache_line);
}
/* caller must hold metadata lock
* set core_id to -1 to clean the whole cache device
*/
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte)
{
return ocf_metadata_actor(cache, PARTITION_INVALID, core_id,
start_byte, end_byte, _ocf_metadata_sparse_cache_line);
}

View File

@ -0,0 +1,30 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_MISC_H__
#define __METADATA_MISC_H__
static inline ocf_cache_line_t ocf_metadata_hash_func(ocf_cache_t cache,
uint64_t cache_line_num, ocf_core_id_t core_id)
{
return (ocf_cache_line_t) ((cache_line_num * (core_id + 1)) %
cache->device->hash_table_entries);
}
void ocf_metadata_sparse_cache_line(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id,
uint64_t start_byte, uint64_t end_byte);
typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache,
ocf_cache_line_t cache_line);
int ocf_metadata_actor(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_core_id_t core_id,
uint64_t start_byte, uint64_t end_byte,
ocf_metadata_actor_t actor);
#endif /* __METADATA_MISC_H__ */

View File

@ -0,0 +1,227 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "metadata.h"
#include "../utils/utils_part.h"
/* Sets the given collision_index as the new _head_ of the Partition list. */
static void update_partition_head(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
struct ocf_user_part *part = &cache->user_parts[part_id];
part->runtime->head = line;
}
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline)
{
struct ocf_part *free_list = cache->device->freelist_part;
int is_head, is_tail;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ocf_cache_line_t prev, next;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ENV_BUG_ON(cline >= line_entries);
/* Get Partition info */
ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev);
/* Find out if this node is Partition _head_ */
is_head = (prev == line_entries);
is_tail = (next == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (free_list->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
free_list->head = line_entries;
free_list->tail = line_entries;
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(next >= line_entries);
free_list->head = next;
ocf_metadata_set_partition_prev(cache, next, line_entries);
ocf_metadata_set_partition_next(cache, cline, line_entries);
} else if (is_tail) {
/* Case 3: else if this cline is partition list tail */
ENV_BUG_ON(prev >= line_entries);
free_list->tail = prev;
ocf_metadata_set_partition_prev(cache, cline, line_entries);
ocf_metadata_set_partition_next(cache, prev, line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(next >= line_entries || prev >= line_entries);
/* Update prev and next nodes */
ocf_metadata_set_partition_prev(cache, next, prev);
ocf_metadata_set_partition_next(cache, prev, next);
/* Update the given node */
ocf_metadata_set_partition_info(cache, cline, invalid_part_id,
line_entries, line_entries);
}
free_list->curr_size--;
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t line)
{
struct ocf_part *free_list = cache->device->freelist_part;
ocf_cache_line_t tail;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
ocf_part_id_t invalid_part_id = PARTITION_INVALID;
ENV_BUG_ON(line >= line_entries);
if (free_list->curr_size == 0) {
free_list->head = line;
free_list->tail = line;
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, line_entries);
} else {
tail = free_list->tail;
ENV_BUG_ON(tail >= line_entries);
ocf_metadata_set_partition_info(cache, line, invalid_part_id,
line_entries, tail);
ocf_metadata_set_partition_next(cache, tail, line);
free_list->tail = line;
}
free_list->curr_size++;
}
/* Adds the given collision_index to the _head_ of the Partition list */
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
ocf_cache_line_t line_head;
ocf_cache_line_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* First node to be added/ */
if (!part->runtime->curr_size) {
update_partition_head(cache, part_id, line);
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else {
/* Not the first node to be added. */
line_head = part->runtime->head;
ENV_BUG_ON(!(line_head < line_entries));
ocf_metadata_set_partition_info(cache, line, part_id,
line_head, line_entries);
ocf_metadata_set_partition_prev(cache, line_head, line);
update_partition_head(cache, part_id, line);
}
part->runtime->curr_size++;
}
/* Deletes the node with the given collision_index from the Partition list */
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line)
{
int is_head, is_tail;
ocf_cache_line_t prev_line, next_line;
uint32_t line_entries = cache->device->collision_table_entries;
struct ocf_user_part *part = &cache->user_parts[part_id];
ENV_BUG_ON(!(line < line_entries));
/* Get Partition info */
ocf_metadata_get_partition_info(cache, line, NULL,
&next_line, &prev_line);
/* Find out if this node is Partition _head_ */
is_head = (prev_line == line_entries);
is_tail = (next_line == line_entries);
/* Case 1: If we are head and there is only one node. So unlink node
* and set that there is no node left in the list.
*/
if (is_head && (part->runtime->curr_size == 1)) {
ocf_metadata_set_partition_info(cache, line,
part_id, line_entries, line_entries);
update_partition_head(cache, part_id, line_entries);
if (!ocf_part_is_valid(part)) {
/* Partition becomes not empty, and is not valid
* update list of partitions
*/
ocf_part_sort(cache);
}
} else if (is_head) {
/* Case 2: else if this collision_index is partition list head,
* but many nodes, update head and return
*/
ENV_BUG_ON(!(next_line < line_entries));
update_partition_head(cache, part_id, next_line);
ocf_metadata_set_partition_next(cache, line, line_entries);
ocf_metadata_set_partition_prev(cache, next_line,
line_entries);
} else if (is_tail) {
/* Case 3: else if this collision_index is partition list tail
*/
ENV_BUG_ON(!(prev_line < line_entries));
ocf_metadata_set_partition_prev(cache, line, line_entries);
ocf_metadata_set_partition_next(cache, prev_line,
line_entries);
} else {
/* Case 4: else this collision_index is a middle node.
* There is no change to the head and the tail pointers.
*/
ENV_BUG_ON(!(next_line < line_entries));
ENV_BUG_ON(!(prev_line < line_entries));
/* Update prev and next nodes */
ocf_metadata_set_partition_next(cache, prev_line, next_line);
ocf_metadata_set_partition_prev(cache, next_line, prev_line);
/* Update the given node */
ocf_metadata_set_partition_info(cache, line, part_id,
line_entries, line_entries);
}
part->runtime->curr_size--;
}

View File

@ -0,0 +1,78 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_H__
#define __METADATA_PARTITION_H__
#include "metadata_partition_structs.h"
#include "../ocf_cache_priv.h"
#define PARTITION_DEFAULT 0
#define PARTITION_INVALID ((ocf_part_id_t)-1)
#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1)
static inline ocf_part_id_t ocf_metadata_get_partition_id(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_id(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_next(cache, line);
}
static inline ocf_cache_line_t ocf_metadata_get_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line)
{
return cache->metadata.iface.get_partition_prev(cache, line);
}
static inline void ocf_metadata_get_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t *part_id, ocf_cache_line_t *next_line,
ocf_cache_line_t *prev_line)
{
cache->metadata.iface.get_partition_info(cache, line, part_id,
next_line, prev_line);
}
static inline void ocf_metadata_set_partition_next(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t next_line)
{
cache->metadata.iface.set_partition_next(cache, line, next_line);
}
static inline void ocf_metadata_set_partition_prev(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_prev(cache, line, prev_line);
}
static inline void ocf_metadata_set_partition_info(
struct ocf_cache *cache, ocf_cache_line_t line,
ocf_part_id_t part_id, ocf_cache_line_t next_line,
ocf_cache_line_t prev_line)
{
cache->metadata.iface.set_partition_info(cache, line, part_id,
next_line, prev_line);
}
void ocf_metadata_add_to_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_remove_from_free_list(struct ocf_cache *cache,
ocf_cache_line_t cline);
void ocf_metadata_add_to_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
void ocf_metadata_remove_from_partition(struct ocf_cache *cache,
ocf_part_id_t part_id, ocf_cache_line_t line);
#endif /* __METADATA_PARTITION_H__ */

View File

@ -0,0 +1,50 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __METADATA_PARTITION_STRUCTS_H__
#define __METADATA_PARTITION_STRUCTS_H__
#include "../utils/utils_list.h"
#include "../cleaning/cleaning.h"
#include "../eviction/eviction.h"
struct ocf_part {
ocf_cache_line_t head;
ocf_cache_line_t tail;
uint32_t curr_size;
};
struct ocf_user_part_config {
char name[OCF_IO_CLASS_NAME_MAX];
uint32_t min_size;
uint32_t max_size;
int16_t priority;
ocf_cache_mode_t cache_mode;
struct {
uint8_t valid : 1;
uint8_t added : 1;
uint8_t eviction : 1;
/*!< This bits is setting during partition sorting,
* and means that can evict from this partition
*/
} flags;
};
struct ocf_user_part_runtime {
uint32_t curr_size;
uint32_t head;
struct eviction_policy eviction;
struct cleaning_policy cleaning;
};
struct ocf_user_part {
struct ocf_user_part_config *config;
struct ocf_user_part_runtime *runtime;
struct ocf_lst_entry lst_valid;
};
#endif /* __METADATA_PARTITION_STRUCTS_H__ */

609
src/metadata/metadata_raw.c Normal file
View File

@ -0,0 +1,609 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "metadata.h"
#include "metadata_hash.h"
#include "metadata_raw.h"
#include "metadata_io.h"
#include "metadata_raw_atomic.h"
#include "../ocf_def_priv.h"
#define OCF_METADATA_RAW_DEBUG 0
#if 1 == OCF_METADATA_RAW_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(log_info, "[Metadata][Raw] %s\n", __func__)
#define OCF_DEBUG_MSG(cache, msg) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - %s\n", \
__func__, msg)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_MSG(cache, msg)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
/*******************************************************************************
* Common RAW Implementation
******************************************************************************/
/*
* Check if page is valid for specified RAW descriptor
*/
static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
{
ENV_BUG_ON(page < raw->ssd_pages_offset);
ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
return true;
}
/*******************************************************************************
* RAW RAM Implementation
******************************************************************************/
#define _RAW_RAM_ADDR(raw, line) \
(raw->mem_pool + (((uint64_t)raw->entry_size * (line))))
#define _RAW_RAM_PAGE(raw, line) \
((line) / raw->entries_in_page)
#define _RAW_RAM_PAGE_SSD(raw, line) \
(raw->ssd_pages_offset + _RAW_RAM_PAGE(raw, line))
#define _RAW_RAM_ADDR_PAGE(raw, line) \
(_RAW_RAM_ADDR(raw, \
_RAW_RAM_PAGE(raw, line) * raw->entries_in_page))
#define _RAW_RAM_GET(raw, line, data) \
env_memcpy(data, raw->entry_size, _RAW_RAM_ADDR(raw, (line)), \
raw->entry_size)
#define _RAW_RAM_SET(raw, line, data) \
env_memcpy(_RAW_RAM_ADDR(raw, line), raw->entry_size, \
data, raw->entry_size)
/*
* RAM Implementation - De-Initialize
*/
static int _raw_ram_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
if (raw->mem_pool) {
env_vfree(raw->mem_pool);
raw->mem_pool = NULL;
}
return 0;
}
/*
* RAM Implementation - Initialize
*/
static int _raw_ram_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t mem_pool_size;
OCF_DEBUG_TRACE(cache);
/* Allocate memory pool for entries */
mem_pool_size = raw->ssd_pages;
mem_pool_size *= PAGE_SIZE;
raw->mem_pool_limit = mem_pool_size;
raw->mem_pool = env_vzalloc(mem_pool_size);
if (!raw->mem_pool)
return -ENOMEM;
return 0;
}
/*
* RAM Implementation - Size of
*/
static size_t _raw_ram_size_of(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
size_t size;
size = raw->ssd_pages;
size *= PAGE_SIZE;
return size;
}
/*
* RAM Implementation - Size on SSD
*/
static uint32_t _raw_ram_size_on_ssd(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
const size_t alignment = 128 * KiB / PAGE_SIZE;
return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
}
/*
* RAM Implementation - Checksum
*/
static uint32_t _raw_ram_checksum(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
uint64_t i;
uint32_t step = 0;
uint32_t crc = 0;
for (i = 0; i < raw->ssd_pages; i++) {
crc = env_crc32(crc, raw->mem_pool + PAGE_SIZE * i, PAGE_SIZE);
OCF_COND_RESCHED(step, 10000);
}
return crc;
}
/*
* RAM Implementation - Get entry
*/
static int _raw_ram_get(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_GET(raw, line, data);
}
/*
* RAM Implementation - Read only entry access
*/
static const void *_raw_ram_rd_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Read only entry access
*/
static void *_raw_ram_wr_access(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_ADDR(raw, line);
}
/*
* RAM Implementation - Set Entry
*/
static int _raw_ram_set(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line,
void *data, uint32_t size)
{
ENV_BUG_ON(!_raw_is_valid(raw, line, size));
return _RAW_RAM_SET(raw, line, data);
}
/*
* RAM Implementation - Flush specified element from SSD
*/
static int _raw_ram_flush(struct ocf_cache *cache,
struct ocf_metadata_raw *raw, ocf_cache_line_t line)
{
OCF_DEBUG_PARAM(cache, "Line = %u", line);
OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line));
ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size));
return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line),
_RAW_RAM_PAGE_SSD(raw, line));
}
/*
* RAM Implementation - Load all IO callback
*/
static int _raw_ram_load_all_io(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_rd_check(cache->owner, _RAW_RAM_ADDR(raw, line), data, size);
ctx_data_seek(cache->owner, data, ctx_data_seek_current,
PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Load all metadata elements from SSD
*/
static int _raw_ram_load_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_read_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_load_all_io, raw);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_all_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context;
uint32_t size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM Implementation - Flush all elements
*/
static int _raw_ram_flush_all(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
OCF_DEBUG_TRACE(cache);
return metadata_io_write_i(cache, raw->ssd_pages_offset,
raw->ssd_pages, _raw_ram_flush_all_fill, raw);
}
/*
* RAM RAM Implementation - Mark to Flush
*/
static void _raw_ram_flush_mark(struct ocf_cache *cache,
struct ocf_request *rq, uint32_t map_idx, int to_state,
uint8_t start, uint8_t stop)
{
if (to_state == DIRTY || to_state == CLEAN) {
rq->map[map_idx].flush = true;
rq->info.flush_metadata = true;
}
}
/*******************************************************************************
* RAM RAM Implementation - Do Flush Asynchronously
******************************************************************************/
struct _raw_ram_flush_ctx {
struct ocf_metadata_raw *raw;
struct ocf_request *rq;
ocf_metadata_asynch_flush_hndl complete;
env_atomic flush_req_cnt;
int error;
};
static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache,
void *context, int error)
{
struct _raw_ram_flush_ctx *ctx = context;
if (error) {
ctx->error = error;
ocf_metadata_error(cache);
}
if (env_atomic_dec_return(&ctx->flush_req_cnt))
return;
OCF_DEBUG_MSG(cache, "Asynchronous flushing complete");
/* Call metadata flush completed call back */
ctx->rq->error |= ctx->error;
ctx->complete(ctx->rq, ctx->error);
env_free(ctx);
}
/*
* RAM Implementation - Flush IO callback - Fill page
*/
static int _raw_ram_flush_do_asynch_fill(struct ocf_cache *cache,
ctx_data_t *data, uint32_t page, void *context)
{
ocf_cache_line_t line;
uint32_t raw_page;
struct _raw_ram_flush_ctx *ctx = context;
struct ocf_metadata_raw *raw = NULL;
uint64_t size;
ENV_BUG_ON(!ctx);
raw = ctx->raw;
ENV_BUG_ON(!raw);
size = raw->entry_size * raw->entries_in_page;
ENV_BUG_ON(size > PAGE_SIZE);
raw_page = page - raw->ssd_pages_offset;
line = raw_page * raw->entries_in_page;
OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page);
ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size);
ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size);
return 0;
}
/*
* RAM RAM Implementation - Do Flush
*/
int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2)
{
uint32_t *page1 = (uint32_t *)item1;
uint32_t *page2 = (uint32_t *)item2;
if (*page1 > *page2)
return 1;
if (*page1 < *page2)
return -1;
return 0;
}
static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq,
uint32_t *pages_tab, struct ocf_metadata_raw *raw,
int *pages_to_flush) {
int i, j = 0;
int line_no = rq->core_line_count;
struct ocf_map_info *map;
for (i = 0; i < line_no; i++) {
map = &rq->map[i];
if (map->flush) {
pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx);
j++;
}
}
*pages_to_flush = j;
}
static int _raw_ram_flush_do_asynch(struct ocf_cache *cache,
struct ocf_request *rq, struct ocf_metadata_raw *raw,
ocf_end_t complete)
{
int result = 0, i;
uint32_t __pages_tab[MAX_STACK_TAB_SIZE];
uint32_t *pages_tab;
int line_no = rq->core_line_count;
int pages_to_flush;
uint32_t start_page = 0;
uint32_t count = 0;
struct _raw_ram_flush_ctx *ctx;
ENV_BUG_ON(!complete);
OCF_DEBUG_TRACE(cache);
if (!rq->info.flush_metadata) {
/* Nothing to flush call flush callback */
complete(rq, 0);
return 0;
}
ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO);
if (!ctx) {
complete(rq, -ENOMEM);
return -ENOMEM;
}
ctx->rq = rq;
ctx->complete = complete;
ctx->raw = raw;
env_atomic_set(&ctx->flush_req_cnt, 1);
if (line_no <= MAX_STACK_TAB_SIZE) {
pages_tab = __pages_tab;
} else {
pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO);
if (!pages_tab) {
env_free(ctx);
complete(rq, -ENOMEM);
return -ENOMEM;
}
}
/* While sorting in progress keep request remaining equal to 1,
* to prevent freeing of asynchronous context
*/
__raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw,
&pages_to_flush);
env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab),
_raw_ram_flush_do_page_cmp, NULL);
i = 0;
while (i < pages_to_flush) {
start_page = pages_tab[i];
count = 1;
while (true) {
if ((i + 1) >= pages_to_flush)
break;
if (pages_tab[i] == pages_tab[i + 1]) {
i++;
continue;
}
if ((pages_tab[i] + 1) != pages_tab[i + 1])
break;
i++;
count++;
}
env_atomic_inc(&ctx->flush_req_cnt);
result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx,
raw->ssd_pages_offset + start_page, count,
_raw_ram_flush_do_asynch_fill,
_raw_ram_flush_do_asynch_io_complete);
if (result)
break;
i++;
}
_raw_ram_flush_do_asynch_io_complete(cache, ctx, result);
if (line_no > MAX_STACK_TAB_SIZE)
env_free(pages_tab);
return result;
}
/*******************************************************************************
* RAW Interfaces definitions
******************************************************************************/
#include "metadata_raw_dynamic.h"
#include "metadata_raw_volatile.h"
static const struct raw_iface IRAW[metadata_raw_type_max] = {
[metadata_raw_type_ram] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = _raw_ram_flush_mark,
.flush_do_asynch = _raw_ram_flush_do_asynch,
},
[metadata_raw_type_dynamic] = {
.init = raw_dynamic_init,
.deinit = raw_dynamic_deinit,
.size_of = raw_dynamic_size_of,
.size_on_ssd = raw_dynamic_size_on_ssd,
.checksum = raw_dynamic_checksum,
.get = raw_dynamic_get,
.set = raw_dynamic_set,
.rd_access = raw_dynamic_rd_access,
.wr_access = raw_dynamic_wr_access,
.flush = raw_dynamic_flush,
.load_all = raw_dynamic_load_all,
.flush_all = raw_dynamic_flush_all,
.flush_mark = raw_dynamic_flush_mark,
.flush_do_asynch = raw_dynamic_flush_do_asynch,
},
[metadata_raw_type_volatile] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = raw_volatile_size_on_ssd,
.checksum = raw_volatile_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = raw_volatile_flush,
.load_all = raw_volatile_load_all,
.flush_all = raw_volatile_flush_all,
.flush_mark = raw_volatile_flush_mark,
.flush_do_asynch = raw_volatile_flush_do_asynch,
},
[metadata_raw_type_atomic] = {
.init = _raw_ram_init,
.deinit = _raw_ram_deinit,
.size_of = _raw_ram_size_of,
.size_on_ssd = _raw_ram_size_on_ssd,
.checksum = _raw_ram_checksum,
.get = _raw_ram_get,
.set = _raw_ram_set,
.rd_access = _raw_ram_rd_access,
.wr_access = _raw_ram_wr_access,
.flush = _raw_ram_flush,
.load_all = _raw_ram_load_all,
.flush_all = _raw_ram_flush_all,
.flush_mark = raw_atomic_flush_mark,
.flush_do_asynch = raw_atomic_flush_do_asynch,
},
};
/*******************************************************************************
* RAW Top interface implementation
******************************************************************************/
int ocf_metadata_raw_init(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
raw->iface = &(IRAW[raw->raw_type]);
return raw->iface->init(cache, raw);
}
int ocf_metadata_raw_deinit(struct ocf_cache *cache,
struct ocf_metadata_raw *raw)
{
int result;
if (!raw->iface)
return 0;
result = raw->iface->deinit(cache, raw);
raw->iface = NULL;
return result;
}
size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache,
struct ocf_metadata_raw* raw)
{
ENV_BUG_ON(raw->raw_type < metadata_raw_type_min);
ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max);
return IRAW[raw->raw_type].size_on_ssd(cache, raw);
}

Some files were not shown because too many files have changed in this diff Show More