commit a8e1ce8cc59d37cae1f909c07ace70d46fc14e7d Author: Robert Baldyga Date: Thu Nov 29 15:14:21 2018 +0100 Initial commit Signed-off-by: Robert Baldyga diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..59df271 --- /dev/null +++ b/Makefile @@ -0,0 +1,100 @@ +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +PWD:=$(shell pwd) + +ifneq ($(strip $(O)),) +OUTDIR:=$(shell cd $(O) && pwd) +endif + +validate: +ifeq ($(strip $(OUTDIR)),) + $(error No output specified for installing sources or headers) +endif + +ifeq ($(strip $(CMD)),) +INSTALL=ln -s +else ifeq ($(strip $(CMD)),cp) +INSTALL=cp +else ifeq ($(strip $(CMD)),install) +INSTALL=install +else +$(error Not allowed program command) +endif + +# +# Installing headers +# +INC_IN=$(shell find $(PWD)/inc -name '*.[h]' -type f) +INC_OUT=$(patsubst $(PWD)/inc/%,$(OUTDIR)/include/ocf/%,$(INC_IN)) +INC_RM=$(shell find $(OUTDIR)/include/ocf -name '*.[h]' -xtype l 2>/dev/null) + +inc: $(INC_OUT) $(INC_RM) + @$(MAKE) distcleandir + +$(INC_OUT): +ifeq ($(strip $(OUTDIR)),) + $(error No output specified for installing headers) +endif + @echo " INSTALL $@" + @mkdir -p $(dir $@) + @$(INSTALL) $(subst $(OUTDIR)/include/ocf,$(PWD)/inc,$@) $@ + +$(INC_RM): validate + $(if $(shell readlink $@ | grep $(PWD)/inc), \ + @echo " RM $@"; rm $@,) + +# +# Installing sources +# +SRC_IN=$(shell find $(PWD)/src -name '*.[c|h]' -type f) +SRC_OUT=$(patsubst $(PWD)/src/%,$(OUTDIR)/src/ocf/%,$(SRC_IN)) +SRC_RM=$(shell find $(OUTDIR)/src/ocf -name '*.[c|h]' -xtype l 2>/dev/null) + +src: $(SRC_OUT) $(SRC_RM) + @$(MAKE) distcleandir + +$(SRC_OUT): +ifeq ($(strip $(OUTDIR)),) + $(error No output specified for installing sources) +endif + @echo " INSTALL $@" + @mkdir -p $(dir $@) + @$(INSTALL) $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$@) $@ + +$(SRC_RM): validate + $(if $(shell readlink $@ | grep $(PWD)/src), \ + @echo " RM $@"; rm $@,) + +# +# Distclean +# +dist_dir=$(foreach dir,$(shell find $(OUTDIR) -type d -empty), \ +$(if $(wildcard $(subst $(OUTDIR)/src/ocf,$(PWD)/src,$(dir))),$(dir),)) + +distclean: validate + @rm -f $(SRC_OUT) $(INC_OUT) + @$(MAKE) distcleandir + +distcleandir: + $(if $(strip $(dist_dir)), rm -r $(dist_dir),) + +# +# Printing help +# +help: + $(info Available targets:) + $(info inc O= [CMD=cp|install] Install include files into specified directory) + $(info src O= [CMD=cp|install] Install source files into specified directory) + $(info distclean O= Uninstall source and headers from specified directory) + +doc: validate + @cd doc && rm -rf html + @cd doc && doxygen doxygen.cfg + @mkdir -p $(OUTDIR)/doc + @cd doc && mv html $(OUTDIR)/doc/ocf + +.PHONY: inc src validate help distclean distcleandir doc \ + $(INC_RM) $(SRC_RM) $(DIST_DIR) diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 0000000..0f708ca --- /dev/null +++ b/doc/README.md @@ -0,0 +1,294 @@ +# Open CAS Framework + +# Content: +- [Architecture overview](#architecture-overview) +- [Management interface](#library-management) +- [IO path](#reading-and-writing-data) + +# Architecture overview + +Intel(R) Cache Acceleration Software (CAS) consists of: +- Platform independent library called Open CAS Framework (OCF) +- Platform dependent adaptation layers enabling OCF to work in different +environments such as Linux kernel + +An example usage for OCF is Linux kernel (see picture below). +In this case OCF operates as block level cache for block devices. +For this usage model OCF comes with following adaptation layers: +- Library client (top adapter) - its main responsibility is creating +cache volume representing primary storage device. Application can +read/write from/to the cache volume block device as to regular primary +storage device. +- Block device data object (bottom adapter) - is responsible for issuing +IO operations to underlying block device. + +A system administrator can manage cache instances via Intel CAS CLI management +utility called "casadm". + +![OCF Linux deployment view](deployment-1.png) + +Another example of OCF usage is user space block level cache for QEMU +(see picture below). In this example following adaptation layers may exist: +- CAS virtIO-blk driver for QEMU (top adapter) - it exposes +primary storage device (another virtIO driver) to guest OS via OCF library +- virtIO-blk data object (bottom adapter) - enables OCF to access +data on primary storage device or cache device via original virtIO driver + +Please note that actual adapters depend on the environment where OCF is +meant to be run. There can be different bottom adapters delivered for cache device +and primary storage device. For example bottom adapter for caching device may +be implemented using kernel bypass techniques, providing low-latency access to +cache media. + +![OCF deployment in QEMU example](deployment-2.png) + +# Management interface +Management interface delivered with Intel OCF enables system administrator to: + - Configure OCF caching library to target environment, which includes installation +of required platform dependent adapters. + - Starting/stopping and managing existing cache instances. + - Performing observability functions (e.g. retrieving performance counters) + +For more details please see below examples: + +## Library initialization example + +OCF enables possibility use it simultaneously from two independent libraries linked +into the same executable by means of concept of contexts. Each context has its own +set of operations which allow to handle specific data types used by data objects +within this context. + +```c +#include "ocf.h" + +/* Handle to library context */ +ocf_ctx_t ctx; + +/* Your context interface */ +const struct ocf_ctx_ops ctx_ops = { +/* Fill your interface functions */ +}; + +/* Your unique data object type IDs */ +enum my_data_obj_type { + my_data_obj_type_1, + my_data_obj_type_2 +}; + +/* Your data objects interface declaration */ +const struct ocf_data_obj_ops my_data_obj_ops1 = { + .name = "My data object 1", + /* Fill your data object interface functions */ +}; + +const struct ocf_data_obj_ops my_data_obj_ops2 = { + .name = "My data object 2" + /* Fill your data object interface functions */ +}; + +int my_cache_init(void) +{ + int result; + + result ocf_ctx_init(&ctx, &ctx_ops) + if (result) { + /* Cannot initialze context of OCF library */ + return result; + } + /* Initialization successful */ + + /* Now we can register data objects */ + result |= ocf_ctx_register_data_obj_ops(ctx, &my_data_obj_ops1, + my_data_obj_type_1); + if (result) { + /* Cannot register data object interface */ + goto err; + } + + result |= ocf_ctx_register_data_obj_ops(ctx, &my_data_obj_ops2, + my_data_obj_type_2); + if (result) { + /* Cannot register data object interface */ + goto err; + } + + return 0; + +err: + /* In case of failure we destroy context and propagate error code */ + ocf_ctx_exit(ctx); + return result; +} + +``` + +## Cache management +OCF library API provides management functions (@ref ocf_mngt.h). This +interface enables user to manage cache instances. Examples: +- Start cache +```c +int result; +ocf_cache_t cache; /* Handle to your cache */ +struct ocf_mngt_cache_config cfg; /* Your cache configuration */ + +/* Prepare your cache configuration */ + +/* Configure cache mode */ +cfg.cache_mode = ocf_cache_mode_wt; + +/* Now tell how your cache will be initialzed. Selech warm or cold cache */ +cfg.init_mode = ocf_init_mode_init; + +cfg.uuid.data = "/path/to/your/cache/or/unique/id"; + +/* Specify cache data object type */ +cfg.data_obj_type = my_data_obj_type_1; + +/* Other cache configuration */ +... + +/* Start cache. */ +result = ocf_mngt_cache_start(cas, &cache, cfg); +if (!result) { + /* Your cache was created successfully */ +} +``` + +- Add core (primary storage device) to cache +```c +int result; +ocf_core_t core; /* Handle to your core */ +struct ocf_mngt_core_config cfg; /* Your core configuration */ + +/* Prepare core configuration */ + +/* Select core data object type */ +cfg.data_obj_type = my_data_obj_type_2; +/* Set UUID or path of your core */ +cfg.uuid.data = "/path/to/your/core/or/unique/id"; + +result = ocf_mngt_cache_add_core(cache, &core, &cfg); +if (!result) { + /* Your core was added successfully */ +} + +``` + +## Management interface considerations +Each device (cache or core) is assigned with ID, either automatically by OCF or +explicitly specified by user. It is possible to retrieve handle to cache +instance via @ref ocf_cache_get_id. To get handle to core instance please +use @ref ocf_core_get_id. + +Cache management operations are thread safe - it is possible to perform +cache management from many threads at a time. There is a possiblity to "batch" +several cache management operations and execute them under cache management +lock. To do this user needs to first obtain cache management lock, perform management +operations and finally release the lock. For reference see example below. + +```c +int my_complex_work(ocf_cache_id_t cache_id, + ocf_core_id_t core_id) +{ + int result; + ocf_cache_t cache; /* Handle to your cache */ + ocf_core_t core; /* Handle to your core */ + + /* Get cache handle */ + result = ocf_mngt_cache_get(cas, cache_id, &cache); + if (result) + return result; + + /* Lock cache */ + result = ocf_mngt_cache_lock(cache); + if (result) { + ocf_mngt_cache_put(cache); + return result; + } + + /* Get core handle */ + result = ocf_core_get(cache, core_id, &core); + if (result) { + result = -1; + goto END; + } + + /* Cache is locked, you can perform your activities */ + + /* 1. Flush your core */ + result = ocf_mngt_core_flush(cache, core_id, true); + if (result) { + goto END; + } + + /* 2. Your others operations including internal actions */ + + /* 3. Removing core form cache */ + result = ocf_mngt_cache_remove_core(cache, core_id, true); + +END: + ocf_mngt_cache_unlock(cache); /* Remember to unlock cache */ + ocf_mngt_cache_put(cache); /* Release cache referance */ + + return result; +} +``` + +# IO path +Please refer to below sequence diagram for detailed IO flow. Typical IO +path includes: + - IO allocation - creating new IO instance that will be submitted to OCF +for processing + - IO configuration - specifying address and length, IO class, flags and +completion function + - IO submission - actual IO submission to OCF. OCF will perform cache +lookup and based on its results will return data from cache or primary +storage device + - IO completion - is signalled by calling completion function specified +in IO configuration phase + +![An example of IO flow](io-path.png) + +## IO submission example +```c +#include "ocf.h" + +void read_end(struct ocf_io *io, int error) +{ + /* Your IO has been finished. Check the result and inform upper + * layers. + */ + + /* Release IO */ + ocf_io_put(io); +} + +int read(ocf_core_t core, void *data, addr, uint32_t length) +{ + /* Allocate IO */ + struct ocf_io *io = ocf_new_io(core); + + if (!io) { + /* Cannot allocate IO */ + return -ENOMEM; + } + + /* Configure IO, set address, flags, IO class, and etc... */ + ocf_io_configure(io, addr, length, OCF_READ, 0, 0); + + /* Set completion context and function */ + ocf_io_set_cmpl(io, NULL, NULL, read_end); + + /* Set data */ + if (ocf_io_set_data(io, data, 0)) { + ocf_io_put(io); + return -EINVAL; + } + + /* Send IO requests to the cache */ + ocf_submit_io(io); + + /* Just it */ + return 0; +} +``` diff --git a/doc/doxygen.cfg b/doc/doxygen.cfg new file mode 100644 index 0000000..1e22da4 --- /dev/null +++ b/doc/doxygen.cfg @@ -0,0 +1,329 @@ +# Doxyfile 1.8.6 + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = "Open CAS Framework" +PROJECT_NUMBER = +PROJECT_BRIEF = OCF +PROJECT_LOGO = img/logo.png +OUTPUT_DIRECTORY = . +CREATE_SUBDIRS = NO +ALLOW_UNICODE_NAMES = NO +OUTPUT_LANGUAGE = English +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the +ALWAYS_DETAILED_SEC = NO +INLINE_INHERITED_MEMB = NO +FULL_PATH_NAMES = NO +STRIP_FROM_PATH = +STRIP_FROM_INC_PATH = +SHORT_NAMES = NO +JAVADOC_AUTOBRIEF = NO +QT_AUTOBRIEF = NO +MULTILINE_CPP_IS_BRIEF = NO +INHERIT_DOCS = YES +SEPARATE_MEMBER_PAGES = NO +TAB_SIZE = 8 +ALIASES = +TCL_SUBST = +OPTIMIZE_OUTPUT_FOR_C = YES +OPTIMIZE_OUTPUT_JAVA = NO +OPTIMIZE_FOR_FORTRAN = NO +OPTIMIZE_OUTPUT_VHDL = NO +EXTENSION_MAPPING = +MARKDOWN_SUPPORT = YES +AUTOLINK_SUPPORT = YES +BUILTIN_STL_SUPPORT = NO +CPP_CLI_SUPPORT = NO +SIP_SUPPORT = NO +IDL_PROPERTY_SUPPORT = YES +DISTRIBUTE_GROUP_DOC = NO +GROUP_NESTED_COMPOUNDS = NO +SUBGROUPING = YES +INLINE_GROUPED_CLASSES = NO +INLINE_SIMPLE_STRUCTS = NO +TYPEDEF_HIDES_STRUCT = NO +LOOKUP_CACHE_SIZE = 0 +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- +EXTRACT_ALL = NO +EXTRACT_PRIVATE = NO +EXTRACT_PACKAGE = NO +EXTRACT_STATIC = NO +EXTRACT_LOCAL_CLASSES = YES +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +HIDE_UNDOC_MEMBERS = NO +HIDE_UNDOC_CLASSES = NO +HIDE_FRIEND_COMPOUNDS = NO +HIDE_IN_BODY_DOCS = NO +INTERNAL_DOCS = NO +CASE_SENSE_NAMES = NO +HIDE_SCOPE_NAMES = YES +HIDE_COMPOUND_REFERENCE= NO +SHOW_INCLUDE_FILES = YES +SHOW_GROUPED_MEMB_INC = NO +FORCE_LOCAL_INCLUDES = NO +INLINE_INFO = YES +SORT_MEMBER_DOCS = YES +SORT_BRIEF_DOCS = NO +SORT_MEMBERS_CTORS_1ST = NO +SORT_GROUP_NAMES = NO +SORT_BY_SCOPE_NAME = NO +STRICT_PROTO_MATCHING = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +GENERATE_BUGLIST = YES +GENERATE_DEPRECATEDLIST= YES +ENABLED_SECTIONS = +MAX_INITIALIZER_LINES = 30 +SHOW_USED_FILES = YES +SHOW_FILES = YES +SHOW_NAMESPACES = YES +FILE_VERSION_FILTER = +LAYOUT_FILE = +CITE_BIB_FILES = +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- +QUIET = NO +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_IF_DOC_ERROR = YES +WARN_NO_PARAMDOC = NO +WARN_AS_ERROR = NO +WARN_FORMAT = "$file:$line: $text" +WARN_LOGFILE = +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- +INPUT = ../inc README.md +INPUT_ENCODING = UTF-8 +FILE_PATTERNS = *.c \ + *.h \ + *.md +RECURSIVE = YES +EXCLUDE = +EXCLUDE_SYMLINKS = NO +EXCLUDE_PATTERNS = +EXCLUDE_SYMBOLS = +EXAMPLE_PATH = +EXAMPLE_PATTERNS = * +EXAMPLE_RECURSIVE = NO +IMAGE_PATH = ./img/ +INPUT_FILTER = +FILTER_PATTERNS = +FILTER_SOURCE_FILES = NO +FILTER_SOURCE_PATTERNS = +USE_MDFILE_AS_MAINPAGE = README.md +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- +SOURCE_BROWSER = NO +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = YES +SOURCE_TOOLTIPS = YES +USE_HTAGS = NO +VERBATIM_HEADERS = YES +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- +ALPHABETICAL_INDEX = YES +COLS_IN_ALPHA_INDEX = 5 +IGNORE_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- +GENERATE_HTML = YES +HTML_OUTPUT = html +HTML_FILE_EXTENSION = .html +#HTML_HEADER = header.html +#HTML_FOOTER = footer.html +HTML_STYLESHEET = +HTML_EXTRA_STYLESHEET = +HTML_EXTRA_FILES = +HTML_COLORSTYLE_HUE = 220 +HTML_COLORSTYLE_SAT = 100 +HTML_COLORSTYLE_GAMMA = 80 +HTML_TIMESTAMP = NO +HTML_DYNAMIC_SECTIONS = NO +HTML_INDEX_NUM_ENTRIES = 100 +GENERATE_DOCSET = NO +DOCSET_FEEDNAME = "Doxygen generated docs" +DOCSET_BUNDLE_ID = org.doxygen.Project +DOCSET_PUBLISHER_ID = org.doxygen.Publisher +DOCSET_PUBLISHER_NAME = Publisher +GENERATE_HTMLHELP = NO +CHM_FILE = +HHC_LOCATION = +GENERATE_CHI = NO +CHM_INDEX_ENCODING = +BINARY_TOC = NO +TOC_EXPAND = NO +GENERATE_QHP = NO +QCH_FILE = +QHP_NAMESPACE = org.doxygen.Project +QHP_VIRTUAL_FOLDER = doc +QHP_CUST_FILTER_NAME = +QHP_CUST_FILTER_ATTRS = +QHP_SECT_FILTER_ATTRS = +QHG_LOCATION = +GENERATE_ECLIPSEHELP = NO +ECLIPSE_DOC_ID = org.doxygen.Project +DISABLE_INDEX = NO +GENERATE_TREEVIEW = NO +ENUM_VALUES_PER_LINE = 4 +TREEVIEW_WIDTH = 250 +EXT_LINKS_IN_WINDOW = NO +FORMULA_FONTSIZE = 10 +FORMULA_TRANSPARENT = YES +USE_MATHJAX = NO +MATHJAX_FORMAT = HTML-CSS +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +MATHJAX_EXTENSIONS = +MATHJAX_CODEFILE = +SEARCHENGINE = YES +SERVER_BASED_SEARCH = NO +EXTERNAL_SEARCH = NO +SEARCHENGINE_URL = +SEARCHDATA_FILE = searchdata.xml +EXTERNAL_SEARCH_ID = +EXTRA_SEARCH_MAPPINGS = +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- +GENERATE_LATEX = NO +LATEX_OUTPUT = latex +LATEX_CMD_NAME = latex +MAKEINDEX_CMD_NAME = makeindex +COMPACT_LATEX = NO +PAPER_TYPE = a4 +EXTRA_PACKAGES = +LATEX_HEADER = +LATEX_FOOTER = +LATEX_EXTRA_STYLESHEET = +LATEX_EXTRA_FILES = +PDF_HYPERLINKS = YES +USE_PDFLATEX = YES +LATEX_BATCHMODE = NO +LATEX_HIDE_INDICES = NO +LATEX_SOURCE_CODE = NO +LATEX_BIB_STYLE = plain +LATEX_TIMESTAMP = NO +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- +GENERATE_RTF = NO +RTF_OUTPUT = rtf +COMPACT_RTF = NO +RTF_HYPERLINKS = NO +RTF_STYLESHEET_FILE = +RTF_EXTENSIONS_FILE = +RTF_SOURCE_CODE = NO +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- +GENERATE_MAN = NO +MAN_OUTPUT = man +MAN_EXTENSION = .3 +MAN_SUBDIR = +MAN_LINKS = NO +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- +GENERATE_XML = NO +XML_OUTPUT = xml +XML_PROGRAMLISTING = YES +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- +GENERATE_DOCBOOK = NO +DOCBOOK_OUTPUT = docbook +DOCBOOK_PROGRAMLISTING = NO +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- +GENERATE_AUTOGEN_DEF = NO +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- +GENERATE_PERLMOD = NO +PERLMOD_LATEX = NO +PERLMOD_PRETTY = YES +PERLMOD_MAKEVAR_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = NO +EXPAND_ONLY_PREDEF = NO +SEARCH_INCLUDES = YES +INCLUDE_PATH = +INCLUDE_FILE_PATTERNS = +PREDEFINED = +EXPAND_AS_DEFINED = +SKIP_FUNCTION_MACROS = YES +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- +TAGFILES = +GENERATE_TAGFILE = +ALLEXTERNALS = NO +EXTERNAL_GROUPS = YES +EXTERNAL_PAGES = YES +PERL_PATH = /usr/bin/perl +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- +CLASS_DIAGRAMS = YES +MSCGEN_PATH = +DIA_PATH = +HIDE_UNDOC_RELATIONS = YES +HAVE_DOT = NO +DOT_NUM_THREADS = 0 +DOT_FONTNAME = Helvetica +DOT_FONTSIZE = 10 +DOT_FONTPATH = +CLASS_GRAPH = YES +COLLABORATION_GRAPH = YES +GROUP_GRAPHS = YES +UML_LOOK = NO +UML_LIMIT_NUM_FIELDS = 10 +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = YES +INCLUDED_BY_GRAPH = YES +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +DIRECTORY_GRAPH = YES +DOT_IMAGE_FORMAT = png +INTERACTIVE_SVG = NO +DOT_PATH = +DOTFILE_DIRS = +MSCFILE_DIRS = +DIAFILE_DIRS = +PLANTUML_JAR_PATH = +PLANTUML_INCLUDE_PATH = +DOT_GRAPH_MAX_NODES = 50 +MAX_DOT_GRAPH_DEPTH = 0 +DOT_TRANSPARENT = NO +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = YES +DOT_CLEANUP = YES diff --git a/doc/img/deployment-1.png b/doc/img/deployment-1.png new file mode 100644 index 0000000..ccf6769 Binary files /dev/null and b/doc/img/deployment-1.png differ diff --git a/doc/img/deployment-2.png b/doc/img/deployment-2.png new file mode 100644 index 0000000..f31df9e Binary files /dev/null and b/doc/img/deployment-2.png differ diff --git a/doc/img/io-path.png b/doc/img/io-path.png new file mode 100644 index 0000000..8c43fdc Binary files /dev/null and b/doc/img/io-path.png differ diff --git a/doc/img/logo.png b/doc/img/logo.png new file mode 100644 index 0000000..00fbf21 Binary files /dev/null and b/doc/img/logo.png differ diff --git a/inc/cleaning/acp.h b/inc/cleaning/acp.h new file mode 100644 index 0000000..9ca121a --- /dev/null +++ b/inc/cleaning/acp.h @@ -0,0 +1,49 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __OCF_CLEANING_ACP_H__ +#define __OCF_CLEANING_ACP_H__ + +/** + * @file + * @brief ACP cleaning policy API + */ + +enum ocf_cleaning_acp_parameters { + ocf_acp_wake_up_time, + ocf_acp_flush_max_buffers, +}; + +/** + * @name ACP cleaning policy parameters + * @{ + */ + +/** + * ACP cleaning policy time between flushing cycles (in ms) + */ + +/**< Wake up time minimum value */ +#define OCF_ACP_MIN_WAKE_UP 0 +/**< Wake up time maximum value */ +#define OCF_ACP_MAX_WAKE_UP 10000 +/**< Wake up time default value */ +#define OCF_ACP_DEFAULT_WAKE_UP 10 + +/** + * ACP cleaning thread number of dirty cache lines to be flushed in one cycle + */ + +/** Dirty cache lines to be flushed in one cycle minimum value */ +#define OCF_ACP_MIN_FLUSH_MAX_BUFFERS 1 +/** Dirty cache lines to be flushed in one cycle maximum value */ +#define OCF_ACP_MAX_FLUSH_MAX_BUFFERS 10000 +/** Dirty cache lines to be flushed in one cycle default value */ +#define OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS 128 + +/** + * @} + */ + +#endif /* __OCF_CLEANING_ACP_H__ */ diff --git a/inc/cleaning/alru.h b/inc/cleaning/alru.h new file mode 100644 index 0000000..17311ad --- /dev/null +++ b/inc/cleaning/alru.h @@ -0,0 +1,74 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __OCF_CLEANING_ALRU_H__ +#define __OCF_CLEANING_ALRU_H__ + +/** + * @file + * @brief ALRU cleaning policy API + */ + +enum ocf_cleaning_alru_parameters { + ocf_alru_wake_up_time, + ocf_alru_stale_buffer_time, + ocf_alru_flush_max_buffers, + ocf_alru_activity_threshold, +}; + +/** + * @name ALRU cleaning policy parameters + * @{ + */ + +/** + * ALRU cleaning thread wake up time + */ + +/** Wake up time minimum value */ +#define OCF_ALRU_MIN_WAKE_UP 1 +/** Wake up time maximum value */ +#define OCF_ALRU_MAX_WAKE_UP 3600 +/** Wake up time default value */ +#define OCF_ALRU_DEFAULT_WAKE_UP 20 + +/** + * ALRU cleaning thread staleness time + */ + +/** Staleness time minimum value */ +#define OCF_ALRU_MIN_STALENESS_TIME 1 +/** Staleness time maximum value */ +#define OCF_ALRU_MAX_STALENESS_TIME 3600 +/** Staleness time default value*/ +#define OCF_ALRU_DEFAULT_STALENESS_TIME 120 + +/** + * ALRU cleaning thread number of dirty cache lines to be flushed in one cycle + */ + +/** Dirty cache lines to be flushed in one cycle minimum value */ +#define OCF_ALRU_MIN_FLUSH_MAX_BUFFERS 1 +/** Dirty cache lines to be flushed in one cycle maximum value */ +#define OCF_ALRU_MAX_FLUSH_MAX_BUFFERS 10000 +/** Dirty cache lines to be flushed in one cycle default value */ +#define OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS 100 + +/** + * ALRU cleaning thread cache idle time before flushing thread can start + */ + +/** Idle time before flushing thread can start minimum value */ +#define OCF_ALRU_MIN_ACTIVITY_THRESHOLD 500 +/** Idle time before flushing thread can start maximum value */ +#define OCF_ALRU_MAX_ACTIVITY_THRESHOLD 1000000 +/** Idle time before flushing thread can start default value */ +#define OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD 10000 + +/** + * @} + */ + + +#endif /* __OCF_CLEANING_ALRU_H__ */ diff --git a/inc/ocf.h b/inc/ocf.h new file mode 100644 index 0000000..6ace002 --- /dev/null +++ b/inc/ocf.h @@ -0,0 +1,37 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_H__ +#define __OCF_H__ + +/** + * @file + * @brief Main OCF header + * This file doesn't contain any functions or structures. + * It's simply collective include file to allow OCF user include + * everything at once. + */ + +#include "ocf_def.h" +#include "ocf_types.h" +#include "ocf_utilities.h" +#include "ocf_io.h" +#include "ocf_data_obj.h" +#include "ocf_cache.h" +#include "ocf_core.h" +#include "ocf_queue.h" +#include "ocf_cleaner.h" +#include "cleaning/alru.h" +#include "cleaning/acp.h" +#include "ocf_metadata.h" +#include "ocf_metadata_updater.h" +#include "ocf_io_class.h" +#include "ocf_stats.h" +#include "ocf_stats_builder.h" +#include "ocf_mngt.h" +#include "ocf_ctx.h" +#include "ocf_err.h" + +#endif /* __OCF_H__ */ diff --git a/inc/ocf_cache.h b/inc/ocf_cache.h new file mode 100644 index 0000000..f92d5a7 --- /dev/null +++ b/inc/ocf_cache.h @@ -0,0 +1,250 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_CACHE_H__ +#define __OCF_CACHE_H__ + +/** + * @file + * @brief OCF cache API + */ + +#include "ocf_types.h" +#include "ocf_data_obj.h" +#include "ocf_ctx.h" +#include "ocf_def.h" + +/** + * @brief Cache info: configuration, status + */ +struct ocf_cache_info { + bool attached; + /*!< True if caching cache is attached to cache */ + + uint8_t data_obj_type; + /*!< Cache data object type */ + + uint32_t size; + /*!< Actual cache size (in cache lines) */ + + /* Statistics of inactive cores */ + struct { + uint32_t occupancy; + /*!< Cache occupancy (in cache lines) */ + + uint32_t dirty; + /*!< Dirty blocks within cache (in cache lines) */ + } inactive; + + uint32_t occupancy; + /*!< Actual cache occupancy (in cache lines) */ + + uint32_t dirty; + /*!< Dirty blocks within cache (in cache lines) */ + + uint32_t dirty_initial; + /*!< Dirty blocks within cache that where there when switching + * out of WB mode + */ + + uint32_t dirty_for; + /*!< How long there are dirty cache lines (in seconds) */ + + ocf_cache_mode_t cache_mode; + /*!< Current cache mode */ + + /* Statistics of fallback Pass Through */ + struct { + int error_counter; + /*!< How many requests to cache failed because of IO error */ + + bool status; + /*!< Current cache mode is PT, + set as a result of reaching IO error threshold */ + } fallback_pt; + + uint8_t state; + /*!< Cache state (running/flushing/stopping etc...) */ + + ocf_eviction_t eviction_policy; + /*!< Eviction policy selected */ + + ocf_cleaning_t cleaning_policy; + /*!< Cleaning policy selected (alru/nop) */ + + ocf_cache_line_size_t cache_line_size; + /*!< Cache line size in KiB */ + + uint32_t flushed; + /*!< Number of block flushed in ongoing flush operation */ + + uint32_t core_count; + /*!< Number of core devices associated with this cache */ + + uint64_t metadata_footprint; + /*!< Metadata memory footprint (in bytes) */ + + uint32_t metadata_end_offset; + /*!< LBA offset where metadata ends (in 4KiB blocks) */ +}; + +/** + * @brief Obtain data object from cache + * + * @param[in] cache Cache object + * + * @retval Data object, NULL if dettached. + */ +ocf_data_obj_t ocf_cache_get_data_object(ocf_cache_t cache); + +/** + * @brief Get ID of given cache object + * + * @param[in] cache Cache object + * + * @retval Cache ID + */ +ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache); + +/** + * @brief Get queue object associated with cache + * + * @param[in] cache Cache object + * @param[in] id Queue id + * @param[out] q Queue object + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_cache_get_queue(ocf_cache_t cache, unsigned id, ocf_queue_t *q); + +/** + * @brief Set name of given cache object + * + * @param[in] cache Cache object + * @param[in] src Source of Cache name + * @param[in] src_size Size of src + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size); + +/** + * @brief Get name of given cache object + * + * @param[in] cache Cache object + * + * @retval Cache name + */ +const char *ocf_cache_get_name(ocf_cache_t cache); + +/** + * @brief Check is cache in incomplete state + * + * @param[in] cache Cache object + * + * @retval 1 Cache is in incomplete state + * @retval 0 Cache is in complete state + */ +bool ocf_cache_is_incomplete(ocf_cache_t cache); + +/** + * @brief Check if caching device is attached + * + * @param[in] cache Cache object + * + * @retval 1 Caching device is attached + * @retval 0 Caching device is detached + */ +bool ocf_cache_is_device_attached(ocf_cache_t cache); + +/** + * @brief Check if cache object is running + * + * @param[in] cache Cache object + * + * @retval 1 Caching device is being stopped + * @retval 0 Caching device is being stopped + */ +bool ocf_cache_is_running(ocf_cache_t cache); + +/** + * @brief Get cache mode of given cache object + * + * @param[in] cache Cache object + * + * @retval Cache mode + */ +ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache); + +/** + * @brief Get cache line size of given cache object + * + * @param[in] cache Cache object + * + * @retval Cache line size + */ +ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache); + +/** + * @brief Convert bytes to cache lines + * + * @param[in] cache Cache object + * @param[in] bytes Number of bytes + * + * @retval Cache lines count + */ +uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes); + +/** + * @brief Get core count of given cache object + * + * @param[in] cache Cache object + * + * @retval Core count + */ +uint32_t ocf_cache_get_core_count(ocf_cache_t cache); + +/** + * @brief Get cache mode of given cache object + * + * @param[in] cache Cache object + * @param[out] info Cache info structure + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info); + +/** + * @brief Get UUID of data object associated with cache + * + * @param[in] cache Cache object + * + * @retval Data object UUID, NULL if detached. + */ +const struct ocf_data_obj_uuid *ocf_cache_get_uuid(ocf_cache_t cache); + +/** + * @brief Get OCF context of given cache object + * + * @param[in] cache Cache object + * + * @retval OCF context + */ +ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache); + +/** + * @brief Get data object type id of given cache object + * + * @param[in] cache Cache object + * + * @retval data object type id, -1 if device detached + */ +uint8_t ocf_cache_get_type_id(ocf_cache_t cache); + +#endif /* __OCF_CACHE_H__ */ diff --git a/inc/ocf_cfg.h b/inc/ocf_cfg.h new file mode 100644 index 0000000..266d6c9 --- /dev/null +++ b/inc/ocf_cfg.h @@ -0,0 +1,36 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_CFG_H__ +#define __OCF_CFG_H__ + +/** + * @file + * @brief OCF configuration file + */ + +/** + * Configure maximum numbers of cores in cache instance + */ +#ifndef OCF_CONFIG_MAX_CORES +#define OCF_CONFIG_MAX_CORES 4096 +#endif + +/** Maximum number of IO classes that can be configured */ +#ifndef OCF_CONFIG_MAX_IO_CLASSES +#define OCF_CONFIG_MAX_IO_CLASSES 33 +#endif + +#if OCF_CONFIG_MAX_IO_CLASSES > 256 +#error "Limit of maximum number of IO classes exceeded" +#endif + +/** Enabling debug statistics */ +#ifndef OCF_CONFIG_DEBUG_STATS +#define OCF_CONFIG_DEBUG_STATS 0 +#endif + +#endif /* __OCF_CFG_H__ */ diff --git a/inc/ocf_cleaner.h b/inc/ocf_cleaner.h new file mode 100644 index 0000000..85c0df7 --- /dev/null +++ b/inc/ocf_cleaner.h @@ -0,0 +1,51 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_CLEANER_H_ +#define OCF_CLEANER_H_ + +/** + * @file + * @brief OCF cleaner API for synchronization dirty data + * + */ + +/** + * @brief Run cleaner + * + * @param[in] c Cleaner instance to run + * @param[in] io_queue I/O queue to which cleaner requests should be submitted + * + * @retval Hint when to run cleaner next time. Value expressed in miliseconds. + */ +uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue); + +/** + * @brief Set cleaner private data + * + * @param[in] c Cleaner handle + * @param[in] priv Private data + */ +void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv); + +/** + * @brief Get cleaner private data + * + * @param[in] c Cleaner handle + * + * @retval Cleaner private data + */ +void *ocf_cleaner_get_priv(ocf_cleaner_t c); + +/** + * @brief Get cache instance to which cleaner belongs + * + * @param[in] c Cleaner handle + * + * @retval Cache instance + */ +ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c); + +#endif diff --git a/inc/ocf_core.h b/inc/ocf_core.h new file mode 100644 index 0000000..4c1f08e --- /dev/null +++ b/inc/ocf_core.h @@ -0,0 +1,242 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/** + * @file + * @brief OCF core API + */ + +#ifndef __OCF_CORE_H__ +#define __OCF_CORE_H__ + +#include "ocf_types.h" +#include "ocf_data_obj.h" +#include "ocf_io.h" +#include "ocf_mngt.h" + +/** + * @brief Obtain cache object from core + * + * @param[in] core Core object + * + * @retval Cache object + */ +ocf_cache_t ocf_core_get_cache(ocf_core_t core); + +/** + * @brief Obtain data object associated with core + * + * @param[in] core Core object + * + * @retval Data object + */ +ocf_data_obj_t ocf_core_get_data_object(ocf_core_t core); + +/** + * @brief Get UUID of data object associated with core + * + * @param[in] core Core object + * + * @retval Data object UUID + */ +static inline const struct ocf_data_obj_uuid *ocf_core_get_uuid(ocf_core_t core) +{ + return ocf_data_obj_get_uuid(ocf_core_get_data_object(core)); +} + +/** + * @brief Asociate new UUID value with given core + * + * @param[in] core Core object + * @param[in] uuid new core uuid + * + * @retval Data object UUID + */ +int ocf_core_set_uuid(ocf_core_t core, const struct ocf_data_obj_uuid *uuid); + +/** + * @brief Get sequential cutoff threshold of given core object + * + * @param[in] core Core object + * + * @retval Sequential cutoff threshold [B] + */ +uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core); + +/** + * @brief Get sequential cutoff policy of given core object + * + * @param[in] core Core object + * + * @retval Sequential cutoff policy + */ +ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core); + +/** + * @brief Get ID of given core object + * + * @param[in] core Core object + * + * @retval Core ID + */ +ocf_core_id_t ocf_core_get_id(ocf_core_t core); + +/** + * @brief Set name of given core object + * + * @param[in] core Core object + * @param[in] src Source of Core name + * @param[in] src_size Size of src + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size); + +/** + * @brief Get name of given core object + * + * @param[in] core Core object + * + * @retval Core name + */ +const char *ocf_core_get_name(ocf_core_t core); + +/** + * @brief Get core state + * + * @param[in] core Core object + * + * @retval Core state + */ +ocf_core_state_t ocf_core_get_state(ocf_core_t core); + +/** + * @brief Obtain core object of given ID from cache + * + * @param[in] cache Cache object + * @param[in] id Core ID + * @param[out] core Core object + * + * @retval 0 Success + * @retval Non-zero Core getting failed + */ +int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core); + +/** + * @brief Set persistent user metadata for given core + * + * @param[in] core Core object + * @param[in] data User data buffer + * @param[in] size Size of user data buffer + * + * @retval 0 Success + * @retval Non-zero Core getting failed + */ +int ocf_core_set_user_metadata(ocf_core_t core, void *data, size_t size); + +/** + * @brief Get persistent user metadata from given core + * + * @param[in] core Core object + * @param[out] data User data buffer + * @param[in] size Size of user data buffer + * + * @retval 0 Success + * @retval Non-zero Core getting failed + */ +int ocf_core_get_user_metadata(ocf_core_t core, void *data, size_t size); + +/** + * @brief Allocate new ocf_io + * + * @param[in] core Core object + * + * @retval ocf_io object + */ +struct ocf_io *ocf_new_io(ocf_core_t core); + +/** + * @brief Submit ocf_io + * + * @param[in] io IO to be submitted + * @param[in] mode Cache mode to be enforced + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode); + +/** + * @brief Submit ocf_io + * + * @param[in] io IO to be submitted + * + * @retval 0 Success + * @retval Non-zero Fail + */ +static inline int ocf_submit_io(struct ocf_io *io) +{ + return ocf_submit_io_mode(io, ocf_cache_mode_none); +} + +/** + * @brief Fast path for submitting IO. If possible, request is processed + * immediately without adding to internal request queue + * + * @param[in] io IO to be submitted + * + * @retval 0 IO has been submitted successfully + * @retval Non-zero Fast submit failed. Try to submit IO with ocf_submit_io() + */ +int ocf_submit_io_fast(struct ocf_io *io); + +/** + * @brief Submit ocf_io with flush command + * + * @param[in] io IO to be submitted + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_submit_flush(struct ocf_io *io); + +/** + * @brief Submit ocf_io with discard command + * + * @param[in] io IO to be submitted + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_submit_discard(struct ocf_io *io); + +/** + * @brief Core visitor function type which is called back when iterating over + * cores. + * + * @param[in] core Core which is currently iterated (visited) + * @param[in] cntx Visitor context + * + * @retval 0 continue visiting cores + * @retval Non-zero stop iterating and return result + */ +typedef int (*ocf_core_visitor_t)(ocf_core_t core, void *cntx); + +/** + * @brief Run visitor function for each core of given cache + * + * @param[in] cache OCF cache instance + * @param[in] visitor Visitor function + * @param[in] cntx Visitor context + * @param[in] only_opened Visit only opened cores + * + * @retval 0 Success + * @retval Non-zero Fail + */ +int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, + bool only_opened); + +#endif /* __OCF_CORE_H__ */ diff --git a/inc/ocf_ctx.h b/inc/ocf_ctx.h new file mode 100644 index 0000000..b48bff9 --- /dev/null +++ b/inc/ocf_ctx.h @@ -0,0 +1,356 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_CTX_H__ +#define __OCF_CTX_H__ + +/** + * @file + * @brief OCF library context API + */ + +#include "ocf_types.h" +#include "ocf_data_obj.h" +#include "ocf_logger.h" + +/** + * @brief Seeking start position in environment data buffer + */ +typedef enum { + ctx_data_seek_begin, + /*!< Seeking from the beginning of environment data buffer */ + ctx_data_seek_current, + /*!< Seeking from current position in environment data buffer */ +} ctx_data_seek_t; + +/** + * @brief OCF context specific operation + */ +struct ocf_ctx_ops { + /** + * @brief The name of the environment which provides platform + * interface for cache engine + */ + const char *name; + + /** + * @name Context data buffer operations + * @{ + */ + + /** + * @brief Allocate contest data buffer + * + * @param[in] pages The size of data buffer in pages + * + * @return Context data buffer + */ + ctx_data_t *(*data_alloc)(uint32_t pages); + + /** + * @brief Free context data buffer + * + * @param[in] data Contex data buffer which shall be freed + */ + void (*data_free)(ctx_data_t *data); + + /** + * @brief Lock context data buffer to disable swap-out + * + * @param[in] data Contex data buffer which shall be locked + * + * @retval 0 Memory locked successfully + * @retval Non-zero Memory locking failure + */ + int (*data_mlock)(ctx_data_t *data); + + /** + * @brief Unlock context data buffer + * + * @param[in] data Contex data buffer which shall be unlocked + */ + void (*data_munlock)(ctx_data_t *data); + + /** + * @brief Read from environment data buffer into raw data buffer + * + * @param[in,out] dst Destination raw memory buffer + * @param[in] src Source context data buffer + * @param[in] size Number of bytes to be read + * + * @return Number of read bytes + */ + uint32_t (*data_rd)(void *dst, ctx_data_t *src, uint32_t size); + + /** + * @brief Write raw data buffer into context data buffer + * + * @param[in,out] dst Destination context data buffer + * @param[in] src Source raw memory buffer + * @param[in] size Number of bytes to be written + * + * @return Number of written bytes + */ + uint32_t (*data_wr)(ctx_data_t *dst, const void *src, uint32_t size); + + /** + * @brief Zero context data buffer + * + * @param[in,out] dst Destination context data buffer to be zeroed + * @param[in] size Number of bytes to be zeroed + * + * @return Number of zeroed bytes + */ + uint32_t (*data_zero)(ctx_data_t *dst, uint32_t size); + + /** + * @brief Seek read/write head in context data buffer for specified + * offset + * + * @param[in,out] dst Destination context data buffer to be seek + * @param[in] seek Seek beginning offset + * @param[in] size Number of bytes to be seek + * + * @return Number of seek bytes + */ + uint32_t (*data_seek)(ctx_data_t *dst, + ctx_data_seek_t seek, uint32_t size); + + /** + * @brief Copy context data buffer content + * + * @param[in,out] dst Destination context data buffer + * @param[in] src Source context data buffer + * @param[in] to Starting offset in destination buffer + * @param[in] from Starting offset in source buffer + * @param[in] bytes Number of bytes to be copied + * + * @return Number of bytes copied + */ + uint64_t (*data_cpy)(ctx_data_t *dst, ctx_data_t *src, + uint64_t to, uint64_t from, uint64_t bytes); + + /** + * @brief Erase content of data buffer + * + * @param[in] dst Contex data buffer which shall be erased + */ + void (*data_secure_erase)(ctx_data_t *dst); + + /** + * @} + */ + + /** + * @name I/O queue operations + * @{ + */ + /** + * @brief Initialize I/O queue. + * + * This function should create worker, thread or any other queue + * processing related stuff specific to given environment. + * + * @param[in] q I/O queue to be initialized + * + * @retval 0 I/O queue has been initializaed successfully + * @retval Non-zero I/O queue initialization failure + */ + int (*queue_init)(ocf_queue_t q); + + /** + * @brief Kick I/O queue processing + * + * This function should inform worker, thread or any other queue + * processing mechanism, that there are new requests in queue to + * be processed. Processing requests inside current call is not allowed. + * + * @param[in] q I/O queue to be kicked + */ + void (*queue_kick)(ocf_queue_t q); + + /** + * @brief Kick I/O queue processing + * + * This function should inform worker, thread or any other queue + * processing mechanism, that there are new requests in queue to + * be processed. Kick function is allowed to process requests in current + * call + * + * @param[in] q I/O queue to be kicked + */ + void (*queue_kick_sync)(ocf_queue_t q); + + /** + * @brief Stop I/O queue + * + * @param[in] q I/O queue beeing stopped + */ + void (*queue_stop)(ocf_queue_t q); + + /** + * @} + */ + + /** + * @name Cleaner operations + * @{ + */ + /** + * @brief Initialize cleaner. + * + * This function should create worker, thread, timer or any other + * mechanism responsible for calling cleaner routine. + * + * @param[in] c Descriptor of cleaner to be initialized + * + * @retval 0 Cleaner has been initializaed successfully + * @retval Non-zero Cleaner initialization failure + */ + int (*cleaner_init)(ocf_cleaner_t c); + + /** + * @brief Stop cleaner + * + * @param[in] c Descriptor of cleaner beeing stopped + */ + void (*cleaner_stop)(ocf_cleaner_t c); + + /** + * @} + */ + + /** + * @name Metadata updater operations + * @{ + */ + /** + * @brief Initialize metadata updater. + * + * This function should create worker, thread, timer or any other + * mechanism responsible for calling metadata updater routine. + * + * @param[in] mu Handle to metadata updater to be initialized + * + * @retval 0 Metadata updater has been initializaed successfully + * @retval Non-zero I/O queue initialization failure + */ + int (*metadata_updater_init)(ocf_metadata_updater_t mu); + + /** + * @brief Kick metadata updater processing + * + * This function should inform worker, thread or any other mechanism, + * that there are new metadata requests to be processed. + * + * @param[in] mu Metadata updater to be kicked + */ + void (*metadata_updater_kick)(ocf_metadata_updater_t mu); + + /** + * @brief Stop metadata updater + * + * @param[in] mu Metadata updater beeing stopped + */ + void (*metadata_updater_stop)(ocf_metadata_updater_t mu); + + /** + * @} + */ +}; + +/** + * @brief Register data object interface + * + * @note Type of data object operations is unique and cannot be repeated. + * + * @param[in] ctx OCF context + * @param[in] properties Reference to data object properties + * @param[in] type_id Type id of data object operations + * + * @retval 0 Data object operations registered successfully + * @retval Non-zero Data object registration failure + */ +int ocf_ctx_register_data_obj_type(ocf_ctx_t ctx, uint8_t type_id, + const struct ocf_data_obj_properties *properties); + +/** + * @brief Unregister data object interface + * + * @param[in] ctx OCF context + * @param[in] type_id Type id of data object operations + */ +void ocf_ctx_unregister_data_obj_type(ocf_ctx_t ctx, uint8_t type_id); + +/** + * @brief Get data object type operations by type id + * + * @param[in] ctx OCF context + * @param[in] type_id Type id of data object operations which were registered + * + * @return Data object type + * @retval NULL When data object operations were not registered + * for requested type + */ +ocf_data_obj_type_t ocf_ctx_get_data_obj_type(ocf_ctx_t ctx, uint8_t type_id); + +/** + * @brief Get data object type id by type + * + * @param[in] ctx OCF context + * @param[in] type Type of data object operations which were registered + * + * @return Data object type id + * @retval -1 When data object operations were not registered + * for requested type + */ +int ocf_ctx_get_data_obj_type_id(ocf_ctx_t ctx, ocf_data_obj_type_t type); + +/** + * @brief Create data object of given type + * + * @param[in] ctx handle to object designating ocf context + * @param[out] obj data object handle + * @param[in] uuid OCF data object UUID + * @param[in] type_id cache/core object type id + * + * @return Zero when success, othewise en error + */ + +int ocf_ctx_data_obj_create(ocf_ctx_t ctx, ocf_data_obj_t *obj, + struct ocf_data_obj_uuid *uuid, uint8_t type_id); + +/** + * @brief Set OCF context logger + * + * @param[in] ctx OCF context + * @param[in] logger Structure describing logger + * + * @return Zero when success, otherwise an error + */ +int ocf_ctx_set_logger(ocf_ctx_t ctx, const struct ocf_logger *logger); + +/** + * @brief Initialize OCF context + * + * @param[out] ctx OCF context + * @param[in] ops OCF context operations + * + * @return Zero when success, otherwise an error + */ +int ocf_ctx_init(ocf_ctx_t *ctx, const struct ocf_ctx_ops *ops); + +/** + * @brief De-Initialize OCF context + * + * @param[in] ctx OCF context + * + * @note Precondition is stopping all cache instances + * + * @return Zero when success, otherwise an error + */ +int ocf_ctx_exit(ocf_ctx_t ctx); + +#endif /* __OCF_CTX_H__ */ diff --git a/inc/ocf_data_obj.h b/inc/ocf_data_obj.h new file mode 100644 index 0000000..ba763d4 --- /dev/null +++ b/inc/ocf_data_obj.h @@ -0,0 +1,253 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_DATA_OBJ_H__ +#define __OCF_DATA_OBJ_H__ + +/** + * @file + * @brief OCF data object API + */ + +#include "ocf_types.h" + +struct ocf_io; + +/** + * @brief OCF data object UUID maximum allowed size + */ +#define OCF_DATA_OBJ_UUID_MAX_SIZE (4096UL - sizeof(uint32_t)) + +/** + * @brief OCF data object UUID + */ +struct ocf_data_obj_uuid { + size_t size; + /*!< UUID data size */ + + const void *data; + /*!< UUID data content */ +}; + +/** + * @brief This structure describes data object capabilities + */ +struct ocf_data_obj_caps { + uint32_t atomic_writes : 1; + /*!< Data object supports atomic writes */ +}; + +/** + * @brief OCF data object interface declaration + */ +struct ocf_data_obj_ops { + /** + * @brief Allocate new IO for this data object + * + * @param[in] obj Data object for which IO is created + * @return IO On success + * @return NULL On failure + */ + struct ocf_io *(*new_io)(ocf_data_obj_t obj); + + /** + * @brief Submit IO on this data object + * + * @param[in] io IO to be submitted + */ + void (*submit_io)(struct ocf_io *io); + + /** + * @brief Submit IO with flush command + * + * @param[in] io IO to be submitted + */ + void (*submit_flush)(struct ocf_io *io); + + /** + * @brief Submit IO with metadata + * + * @param[in] io IO to be submitted + */ + void (*submit_metadata)(struct ocf_io *io); + + /** + * @brief Submit IO with discard command + * + * @param[in] io IO to be submitted + */ + void (*submit_discard)(struct ocf_io *io); + + /** + * @brief Submit operation to write zeroes to target address (including + * metadata extended LBAs in atomic mode) + * + * @param[in] io IO description (addr, size) + */ + void (*submit_write_zeroes)(struct ocf_io *io); + + /** + * @brief Open data object + * + * @note This function performs data object initialization and should + * be called before any other operation on data object + * + * @param[in] obj Data object + */ + int (*open)(ocf_data_obj_t obj); + + /** + * @brief Close data object + * + * @param[in] obj Data object + */ + void (*close)(ocf_data_obj_t obj); + + /** + * @brief Close data object + * + * @param[in] obj Data object + */ + unsigned int (*get_max_io_size)(ocf_data_obj_t obj); + + /** + * @brief Close data object + * + * @param[in] obj Data object + */ + uint64_t (*get_length)(ocf_data_obj_t obj); +}; + +/** + * @brief This structure describes data object properties + */ +struct ocf_data_obj_properties { + const char *name; + /*!< The name of data object operations */ + + uint32_t io_context_size; + /*!< Size of io context structure */ + + struct ocf_data_obj_caps caps; + /*!< Data object capabilities */ + + struct ocf_data_obj_ops ops; + /*!< Data object operations */ +}; + +/** + * @brief Get data object type + * + * @param[in] obj Data object + * + * @return Data object type + */ +ocf_data_obj_type_t ocf_data_obj_get_type(ocf_data_obj_t obj); + +/** + * @brief Get private context of data object + * + * @param[in] obj Data object + * + * @return Data object private context + */ +void *ocf_data_obj_get_priv(ocf_data_obj_t obj); + +/** + * @brief Set private context for data object + * + * @param[in] obj Data object + * @param[in] priv Data object private context to be set + */ +void ocf_data_obj_set_priv(ocf_data_obj_t obj, void *priv); + +/** + * @brief Get data object UUID + * + * @param[in] obj Data object + * + * @return UUID of data object + */ +const struct ocf_data_obj_uuid *ocf_data_obj_get_uuid(ocf_data_obj_t obj); + +/** + * @brief Get data object length + * + * @param[in] obj Data object + * + * @return Length of data object in bytes + */ +uint64_t ocf_data_obj_get_length(ocf_data_obj_t obj); + +/** + * @brief Get cache handle for given data object + * + * @param obj data object handle + * + * @return Handle to cache for which data object belongs to + */ +ocf_cache_t ocf_data_obj_get_cache(ocf_data_obj_t obj); + +/** + * @brief Initialize data object + * + * @param[in] obj data object handle + * @param[in] type cache/core object type + * @param[in] uuid OCF data object UUID + * @param[in] uuid_copy crate copy of uuid data + * + * @return Zero when success, othewise en error + */ +int ocf_data_obj_init(ocf_data_obj_t obj, ocf_data_obj_type_t type, + struct ocf_data_obj_uuid *uuid, bool uuid_copy); + +/** + * @brief Deinitialize data object + * + * @param[in] obj data object handle + */ +void ocf_data_obj_deinit(ocf_data_obj_t obj); + +/** + * @brief Allocate and initialize data object + * + * @param[out] obj pointer to data object handle + * @param[in] type cache/core object type + * @param[in] uuid OCF data object UUID + * + * @return Zero when success, othewise en error + */ +int ocf_data_obj_create(ocf_data_obj_t *obj, ocf_data_obj_type_t type, + struct ocf_data_obj_uuid *uuid); + +/** + * @brief Deinitialize and free data object + * + * @param[in] obj data object handle + */ +void ocf_data_obj_destroy(ocf_data_obj_t obj); + +/** + * @brief Allocate new io from data object allocator + * + * @param[in] obj data object handle + */ +struct ocf_io *ocf_data_obj_new_io(ocf_data_obj_t obj); + +/** + * @brief Delete io from data object allocator + * + * @param[in] io handle to previously allocated io + */ +void ocf_data_obj_del_io(struct ocf_io* io); + +/** + * @brief Return io context data + * + * @param[in] io ocf io handle + */ +void *ocf_data_obj_get_data_from_io(struct ocf_io* io); + +#endif /* __OCF_DATA_OBJ_H__ */ diff --git a/inc/ocf_def.h b/inc/ocf_def.h new file mode 100644 index 0000000..1346127 --- /dev/null +++ b/inc/ocf_def.h @@ -0,0 +1,325 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_DEF_H__ +#define __OCF_DEF_H__ + +#include "ocf_cfg.h" +/** + * @file + * @brief OCF definitions + */ + +/** + * @name OCF cache definitions + */ +/** + * Minimum value of a valid cache ID + */ +#define OCF_CACHE_ID_MIN 1 +/** + * Maximum value of a valid cache ID + */ +#define OCF_CACHE_ID_MAX 16384 +/** + * Invalid value of cache id + */ +#define OCF_CACHE_ID_INVALID 0 +/** + * Minimum cache size in bytes + */ +#define OCF_CACHE_SIZE_MIN (100 * MiB) +/** + * Size of cache name + */ +#define OCF_CACHE_NAME_SIZE 32 +/** + * Value to turn off fallback pass through + */ +#define OCF_CACHE_FALLBACK_PT_INACTIVE 0 +/** + * Minimum value of io error threshold + */ +#define OCF_CACHE_FALLBACK_PT_MIN_ERROR_THRESHOLD \ + OCF_CACHE_FALLBACK_PT_INACTIVE +/** + * Maximum value of io error threshold + */ +#define OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD 1000000 +/** + * @} + */ + +/** + * @name OCF cores definitions + */ +/** + * Maximum numbers of cores per cache instance + */ +#define OCF_CORE_MAX OCF_CONFIG_MAX_CORES +/** + * Minimum value of a valid core ID + */ +#define OCF_CORE_ID_MIN 0 +/** + * Maximum value of a valid core ID + */ +#define OCF_CORE_ID_MAX (OCF_CORE_MAX - 1) +/** + * Invalid value of core id + */ +#define OCF_CORE_ID_INVALID OCF_CORE_MAX +/** + * Size of core name + */ +#define OCF_CORE_NAME_SIZE 32 +/** + * Minimum value of valid core sequence number + */ +#define OCF_SEQ_NO_MIN 1 +/** + * Maximum value of a valid core sequence number + */ +#define OCF_SEQ_NO_MAX (65535UL) +/* + * Invalid value of core sequence number + */ +#define OCF_SEQ_NO_INVALID 0 +/** + * @} + */ + +/** + * @name Miscellaneous defines + * @{ + */ +#define KiB (1ULL << 10) +#define MiB (1ULL << 20) +#define GiB (1ULL << 30) + +#if OCF_CONFIG_DEBUG_STATS == 1 +/** Macro which indicates that extended debug statistics shall be on*/ +#define OCF_DEBUG_STATS +#endif +/** + * @} + */ + +/** + * This Enumerator describes OCF cache instance state + */ +typedef enum { + ocf_cache_state_running = 0, //!< ocf_cache_state_running + /*!< OCF is currently running */ + + ocf_cache_state_stopping = 1, //!< ocf_cache_state_stopping + /*!< OCF cache instance is stopping */ + + ocf_cache_state_initializing = 2, //!< ocf_cache_state_initializing + /*!< OCF cache instance during initialization */ + + ocf_cache_state_incomplete = 3, //!< ocf_cache_state_incomplete + /*!< OCF cache has at least one inactive core */ + + ocf_cache_state_max //!< ocf_cache_state_max + /*!< Stopper of cache state enumerator */ +} ocf_cache_state_t; + +/** + * This Enumerator describes OCF core instance state + */ +typedef enum { + ocf_core_state_active = 0, + /*!< Core is active */ + + ocf_core_state_inactive, + /*!< Core is inactive (not attached) */ + + ocf_core_state_max, + /*!< Stopper of core state enumerator */ +} ocf_core_state_t; + + +/** + * OCF supported cache modes + */ +typedef enum { + ocf_cache_mode_wt = 0, + /*!< Write-through cache mode */ + + ocf_cache_mode_wb, + /*!< Write-back cache mode */ + + ocf_cache_mode_wa, + /*!< Write-around cache mode */ + + ocf_cache_mode_pt, + /*!< Pass-through cache mode */ + + ocf_cache_mode_wi, + /*!< Write invalidate cache mode */ + + ocf_cache_mode_max, + /*!< Stopper of cache mode enumerator */ + + ocf_cache_mode_default = ocf_cache_mode_wt, + /*!< Default cache mode */ + + ocf_cache_mode_none = -1, + /*!< Current cache mode of given cache instance */ +} ocf_cache_mode_t; + +typedef enum { + ocf_seq_cutoff_policy_always = 0, + /*!< Sequential cutoff always on */ + + ocf_seq_cutoff_policy_full, + /*!< Sequential cutoff when occupancy is 100% */ + + ocf_seq_cutoff_policy_never, + /*!< Sequential cutoff disabled */ + + ocf_seq_cutoff_policy_max, + /*!< Stopper of sequential cutoff policy enumerator */ + + ocf_seq_cutoff_policy_default = ocf_seq_cutoff_policy_full, + /*!< Default sequential cutoff policy*/ +} ocf_seq_cutoff_policy; + +/** + * OCF supported eviction types + */ +typedef enum { + ocf_eviction_lru = 0, + /*!< Last recently used eviction policy */ + + ocf_eviction_max, + /*!< Stopper of enumerator */ + + ocf_eviction_default = ocf_eviction_lru, + /*!< Default eviction policy */ +} ocf_eviction_t; + +/** + * OCF supported Write-Back cleaning policies type + */ +typedef enum { + ocf_cleaning_nop = 0, + /*!< Cleaning won't happen in background. Only on eviction or + * during cache stop + */ + + ocf_cleaning_alru, + /*!< Approximately recently used. Cleaning thread in the + * background enabled which cleans dirty data during IO + * inactivity. + */ + + ocf_cleaning_acp, + /*!< Cleaning algorithm attempts to reduce core device seek + * distance. Cleaning thread runs concurrently with I/O. + */ + + ocf_cleaning_max, + /*!< Stopper of enumerator */ + + ocf_cleaning_default = ocf_cleaning_alru, + /*!< Default cleaning policy type */ +} ocf_cleaning_t; + +/** + * OCF supported cache line sizes in bytes + */ +typedef enum { + ocf_cache_line_size_4 = 4 * KiB, + /*!< 4 kiB */ + + ocf_cache_line_size_8 = 8 * KiB, + /*!< 8 kiB */ + + ocf_cache_line_size_16 = 16 * KiB, + /*!< 16 kiB */ + + ocf_cache_line_size_32 = 32 * KiB, + /*!< 32 kiB */ + + ocf_cache_line_size_64 = 64 * KiB, + /*!< 64 kiB */ + + ocf_cache_line_size_default = ocf_cache_line_size_4, + /*!< Default cache line size */ + + ocf_cache_line_size_min = ocf_cache_line_size_4, + /*!< Minimum cache line size */ + + ocf_cache_line_size_max = ocf_cache_line_size_64, + /*!< Maximal cache line size */ + + ocf_cache_line_size_inf = ~0ULL, + /*!< Force enum to be 64-bit */ +} ocf_cache_line_size_t; + +/** + * Metadata layout + */ +typedef enum { + ocf_metadata_layout_striping = 0, + ocf_metadata_layout_seq = 1, + ocf_metadata_layout_max, + ocf_metadata_layout_default = ocf_metadata_layout_striping +} ocf_metadata_layout_t; + +/** + * @name OCF IO class definitions + */ +/** + * Maximum numbers of IO classes per cache instance + */ +#define OCF_IO_CLASS_MAX OCF_CONFIG_MAX_IO_CLASSES +/** + * Minimum value of a valid IO class ID + */ +#define OCF_IO_CLASS_ID_MIN 0 +/** + * Maximum value of a valid IO class ID + */ +#define OCF_IO_CLASS_ID_MAX (OCF_IO_CLASS_MAX - 1) +/** + * Invalid value of IO class id + */ +#define OCF_IO_CLASS_INVALID OCF_IO_CLASS_MAX + +/** Maximum size of the IO class name */ +#define OCF_IO_CLASS_NAME_MAX 33 + +/** IO class priority which indicates pinning */ +#define OCF_IO_CLASS_PRIO_PINNED -1 + +/** The highest IO class priority */ +#define OCF_IO_CLASS_PRIO_HIGHEST 0 + +/** The lowest IO class priority */ +#define OCF_IO_CLASS_PRIO_LOWEST 255 + +/** Default IO class priority */ +#define OCF_IO_CLASS_PRIO_DEFAULT OCF_IO_CLASS_PRIO_LOWEST +/** + * @} + */ + +/** + * @name I/O operations + * @{ + */ +#define OCF_READ 0 +#define OCF_WRITE 1 +/** + * @} + */ + +#define MAX_TRIM_RQ_SIZE (1 * MiB) + +#endif /* __OCF_DEF_H__ */ diff --git a/inc/ocf_err.h b/inc/ocf_err.h new file mode 100644 index 0000000..b5ba9db --- /dev/null +++ b/inc/ocf_err.h @@ -0,0 +1,97 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_ERR_H__ +#define __OCF_ERR_H__ + +/** + * @file + * @brief OCF error codes definitions + */ + +/** + * @brief OCF error enumerator + */ +typedef enum { + /** Invalid input parameter value */ + OCF_ERR_INVAL = 1000000, + + /** Invalid data object type */ + OCF_ERR_INVAL_DATA_OBJ_TYPE, + + /** Operation interrupted */ + OCF_ERR_INTR, + + /** Unknown error occurred */ + OCF_ERR_UNKNOWN, + + /*!< To many caches */ + OCF_ERR_TOO_MANY_CACHES, + + /** Out of memory */ + OCF_ERR_NO_MEM, + + /** Not enough RAM to start cache */ + OCF_ERR_NO_FREE_RAM, + + /** Start cache failure */ + OCF_ERR_START_CACHE_FAIL, + + /** Cache is busy */ + OCF_ERR_CACHE_IN_USE, + + /** Cache ID does not exist */ + OCF_ERR_CACHE_NOT_EXIST, + + /** Cache ID already exists */ + OCF_ERR_CACHE_EXIST, + + /** Too many core devices in cache */ + OCF_ERR_TOO_MANY_CORES, + + /** Core device not available */ + OCF_ERR_CORE_NOT_AVAIL, + + /** Cannot open device exclusively*/ + OCF_ERR_NOT_OPEN_EXC, + + /** Cache device not available */ + OCF_ERR_CACHE_NOT_AVAIL, + + /** IO Class does not exist */ + OCF_ERR_IO_CLASS_NOT_EXIST, + + /** Error while writing to cache device */ + OCF_ERR_WRITE_CACHE, + + /** Error while writing to core device */ + OCF_ERR_WRITE_CORE, + + /*!< Dirty shutdown */ + OCF_ERR_DIRTY_SHUTDOWN, + + /** Cache contains dirty data */ + OCF_ERR_DIRTY_EXISTS, + + /** Flushing of core interrupted */ + OCF_ERR_FLUSHING_INTERRUPTED, + + /** Adding core to core pool failed */ + OCF_ERR_CANNOT_ADD_CORE_TO_POOL, + + /** Cache is in incomplete state */ + OCF_ERR_CACHE_IN_INCOMPLETE_STATE, + + /** Core device is in inactive state */ + OCF_ERR_CORE_IN_INACTIVE_STATE, + + /** Invalid cache mode */ + OCF_ERR_INVALID_CACHE_MODE, + + /** Invalid cache line size */ + OCF_ERR_INVALID_CACHE_LINE_SIZE, +} ocf_error_t; + +#endif /* __OCF_ERR_H__ */ diff --git a/inc/ocf_io.h b/inc/ocf_io.h new file mode 100644 index 0000000..3ccbb58 --- /dev/null +++ b/inc/ocf_io.h @@ -0,0 +1,336 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_IO_H__ +#define __OCF_IO_H__ + +#include "ocf_types.h" + +/** + * @file + * @brief OCF IO definitions + */ + +struct ocf_io; + +/** + * @brief OCF IO legacy completion + * + * @note This type of completion is for legacy completion type + * + * @param[in] private_data Private data for completion function + * @param[in] error Completion status code + */ +typedef void (*ocf_end_t)(void *private_data, int error); + +/** + * @brief OCF IO start + * + * @note OCF IO start notification callback + * + * @param[in] io OCF IO being started + */ +typedef void (*ocf_start_io_t)(struct ocf_io *io); + +/** + * @brief OCF IO handle + * + * @note OCF IO handle callback + * + * @param[in] io OCF IO to handle + */ +typedef void (*ocf_handle_io_t)(struct ocf_io *io, void *opaque); + +/** + * @brief OCF IO completion + * + * @note Completion function for OCF IO + * + * @param[in] io OCF IO being completed + * @param[in] error Completion status code + */ +typedef void (*ocf_end_io_t)(struct ocf_io *io, int error); + +/** + * @brief OCF IO main structure + */ +struct ocf_io { + /** + * @brief OCF IO destination data object + */ + ocf_data_obj_t obj; + + /** + * @brief Operations set for this OCF IO + */ + const struct ocf_io_ops *ops; + + /** + * @brief OCF IO destination address + */ + uint64_t addr; + + /** + * @brief OCF IO flags + */ + uint64_t flags; + + /** + * @brief OCF IO size in bytes + */ + uint32_t bytes; + + /** + * @brief OCF IO destination class + */ + uint32_t class; + + /** + * @brief OCF IO direction + */ + uint32_t dir; + + /** + * @brief Queue id + */ + uint32_t io_queue; + + /** + * @brief OCF IO start function + */ + ocf_start_io_t start; + + /** + * @brief OCF IO handle function + */ + ocf_handle_io_t handle; + + /** + * @brief OCF IO completion function + */ + ocf_end_io_t end; + + /** + * @brief OCF IO private 1 + */ + void *priv1; + + /** + * @brief OCF IO private 2 + */ + void *priv2; +}; + +/** + * @brief OCF IO operations set structure + */ +struct ocf_io_ops { + /** + * @brief Set up data vector in OCF IO + * + * @param[in] io OCF IO to set up + * @param[in] data Source context data + * @param[in] offset Data offset in source context data + * + * @retval 0 Data set up successfully + * @retval Non-zero Data set up failure + */ + int (*set_data)(struct ocf_io *io, ctx_data_t *data, + uint32_t offset); + + /** + * @brief Get context data from OCF IO + * + * @param[in] io OCF IO to get data + * + * @return Data vector from IO + */ + ctx_data_t *(*get_data)(struct ocf_io *io); + + /** + * @brief Increase reference counter in OCF IO + * + * @param[in] io OCF IO + */ + void (*get)(struct ocf_io *io); + + /** + * @brief Decrease reference counter in OCF IO + * + * @note If IO don't have any reference - deallocate it + * + * @param[in] io OCF IO + */ + void (*put)(struct ocf_io *io); +}; + +/** + * @brief Configure OCF IO + * + * @param[in] io OCF IO + * @param[in] addr OCF IO destination address + * @param[in] bytes OCF IO size in bytes + * @param[in] dir OCF IO direction + * @param[in] class OCF IO destination class + * @param[in] flags OCF IO flags + */ +static inline void ocf_io_configure(struct ocf_io *io, uint64_t addr, + uint32_t bytes, uint32_t dir, uint32_t class, uint64_t flags) +{ + io->addr = addr; + io->bytes = bytes; + io->class = class; + io->flags = flags; + io->dir = dir; +} + +/** + * @brief Increase reference counter in OCF IO + * + * @note Wrapper for get IO operation + * + * @param[in] io OCF IO + */ +static inline void ocf_io_get(struct ocf_io *io) +{ + io->ops->get(io); +} + +/** + * @brief Decrease reference counter in OCF IO + * + * @note If IO don't have any reference - deallocate it + * + * @param[in] io OCF IO + */ +static inline void ocf_io_put(struct ocf_io *io) +{ + io->ops->put(io); +} + +/** + * @brief Set OCF IO completion function + * + * @param[in] io OCF IO + * @param[in] context Context for completion function + * @param[in] fn Completion function + */ +static inline void ocf_io_set_cmpl(struct ocf_io *io, void *context, + void *context2, ocf_end_io_t fn) +{ + io->priv1 = context; + io->priv2 = context2; + io->end = fn; +} + +/** + * @brief Set OCF IO start function + * + * @param[in] io OCF IO + * @param[in] fn Start callback function + */ +static inline void ocf_io_set_start(struct ocf_io *io, ocf_start_io_t fn) +{ + io->start = fn; +} + +/** + * @brief Set OCF IO handle function + * + * @param[in] io OCF IO + * @param[in] fn Handle callback function + */ +static inline void ocf_io_set_handle(struct ocf_io *io, ocf_handle_io_t fn) +{ + io->handle = fn; +} + +/** + * @brief Call default completion function + * + * @note It is helper function for legacy completion functions + * + * @param[in] io OCF IO + * @param[in] error Completion status code + */ +static inline void ocf_io_end_default(struct ocf_io *io, int error) +{ + ocf_end_t end = io->priv2; + + end(io->priv1, error); + + ocf_io_put(io); +} + +/** + * @brief Set OCF IO default completion function + * + * @note This type of completion is for legacy completion type + * + * @param[in] io OCF IO + * @param[in] context Context for completion function + * @param[in] fn Completion function + */ +static inline void ocf_io_set_default_cmpl(struct ocf_io *io, void *context, + ocf_end_t fn) +{ + io->priv1 = context; + io->priv2 = fn; + io->end = ocf_io_end_default; +} + +/** + * @brief Set up data vector in OCF IO + * + * @note Wrapper for set up data vector function + * + * @param[in] io OCF IO to set up + * @param[in] data Source data vector + * @param[in] offset Data offset in source data vector + * + * @retval 0 Data set up successfully + * @retval Non-zero Data set up failure + */ +static inline int ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, + uint32_t offset) +{ + return io->ops->set_data(io, data, offset); +} + +/** + * @brief Get data vector from OCF IO + * + * @note Wrapper for get data vector function + * + * @param[in] io OCF IO to get data + * + * @return Data vector from IO + */ +static inline ctx_data_t *ocf_io_get_data(struct ocf_io *io) +{ + return io->ops->get_data(io); +} + +/** + * @brief Set queue id to which IO should be submitted + * + * @param[in] io OCF IO to set up + * @param[in] queue IO queue id + */ +static inline void ocf_io_set_queue(struct ocf_io *io, uint32_t queue) +{ + io->io_queue = queue; +} + +/** + * @brief Handle IO in cache engine + * + * @param[in] io OCF IO to be handled + * @param[in] opaque OCF opaque + */ +void ocf_io_handle(struct ocf_io *io, void *opaque); + +#endif /* __OCF_IO_H__ */ diff --git a/inc/ocf_io_class.h b/inc/ocf_io_class.h new file mode 100644 index 0000000..e3e7b56 --- /dev/null +++ b/inc/ocf_io_class.h @@ -0,0 +1,109 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/** + * @file + * @brief IO class API + * + * File contains structures and methods for handling IO Class + * differentiation features + */ + +#ifndef __OCF_IO_CLASS_H__ +#define __OCF_IO_CLASS_H__ + +/** + * @brief OCF IO class information + */ +struct ocf_io_class_info { + char name[OCF_IO_CLASS_NAME_MAX]; + /*!< The name of the IO class */ + + ocf_cache_mode_t cache_mode; + /*!< Cache mode of the IO class */ + + int16_t priority; + /*!< IO class priority */ + + uint32_t curr_size; + /*!< Current size of the IO class - number of cache lines which + * were assigned into this IO class + */ + + uint32_t min_size; + /*!< Minimum number of cache lines that were guaranteed + * for specified IO class. If current size reach minimum size + * that no more eviction takes place + */ + + uint32_t max_size; + /*!< Maximum number of cache lines that might be assigned into + * this IO class. If current size reach maximum size no more + * allocation for this IO class takes place + */ + + uint8_t eviction_policy_type; + /*!< The type of eviction policy for given IO class */ + + ocf_cleaning_t cleaning_policy_type; + /*!< The type of cleaning policy for given IO class */ +}; + +/** + * @brief retrieve io class info + * + * function meant to retrieve information pertaining to particular IO class, + * specifically to fill ocf_io_class_info structure based on input parameters. + * + * @param[in] cache cache id, to which specified request pertains. + * @param[in] io_class id of an io class which shall be retreived. + * @param[out] info io class info structure to be filled as a + * result of this function call. + * + * @return function returns 0 upon successful completion; appropriate error + * code is returned otherwise + */ +int ocf_io_class_get_info(ocf_cache_t cache, uint32_t io_class, + struct ocf_io_class_info *info); + +/** + * @brief helper function for ocf_io_class_visit + * + * This function is called back from ocf_io_class_visit for each valid + * configured io class; henceforth all parameters are input parameters, + * no exceptions. It is usable to enumerate all the io classes. + * + * @param[in] cache cache id of cache for which data is being retrieved + * @param[in] io_class_id id of an io class for which callback herein + * is invoked. + * @param[in] cntx a context pointer passed herein from within + * ocf_io_class_visit down to this callback. + * + * @return 0 upon success; Nonzero upon failure (when nonzero is returned, + * this callback won't be invoked for any more io classes) + */ +typedef int (*ocf_io_class_visitor_t)(ocf_cache_t cache, + uint32_t io_class_id, void *cntx); + +/** + * @brief enumerate all of the available IO classes. + * + * This function allows enumeration and retrieval of all io class id's that + * are valid for given cache id via visiting all those with callback function + * that is supplied by caller. + * + * @param[in] cache cache id to which given call pertains + * @param[in] visitor a callback function that will be issued for each and every + * IO class that is configured and valid within given cache instance + * @param[in] cntx a context variable - structure that shall be passed to a + * callback function for every call + * + * @return 0 upon successful completion of the function; otherwise nonzero result + * shall be returned + */ +int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor, + void *cntx); + +#endif /* __OCF_IO_CLASS_H__ */ diff --git a/inc/ocf_logger.h b/inc/ocf_logger.h new file mode 100644 index 0000000..7ce9ca7 --- /dev/null +++ b/inc/ocf_logger.h @@ -0,0 +1,41 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_LOGGER_H__ +#define __OCF_LOGGER_H__ + +/** + * @file + * @brief Logger API + */ + +#include + +/** + * @brief Verbosity levels of context log + */ +typedef enum { + log_emerg, + log_alert, + log_crit, + log_err, + log_warn, + log_notice, + log_info, + log_debug, +} ocf_logger_lvl_t; + +struct ocf_logger { + int (*open)(const struct ocf_logger *logger); + void (*close)(const struct ocf_logger *logger); + int (*printf)(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, va_list args); + int (*printf_rl)(const char *func_name); + int (*dump_stack)(const struct ocf_logger *logger); + + void *priv; +}; + +#endif /* __OCF_LOGGER_H__ */ diff --git a/inc/ocf_metadata.h b/inc/ocf_metadata.h new file mode 100644 index 0000000..a9d57d4 --- /dev/null +++ b/inc/ocf_metadata.h @@ -0,0 +1,99 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_METADATA_H__ +#define __OCF_METADATA_H__ + +/** + * @file + * @brief OCF metadata helper function + * + * Those functions can be used by data object implementation. + */ + +/** + * @brief Atomic metadata for extended sector + * + * @warning The size of this structure has to be equal 8 bytes + */ +struct ocf_atomic_metadata { + /** Core line of core (in cache line size unit) which are cached */ + uint64_t core_line : 46; + + /** Core sequence number to which this line belongs to*/ + uint32_t core_seq_no : 16; + + /** Set bit indicates that given sector is valid (is cached) */ + uint32_t valid : 1; + + /** Set bit indicates that sector i dirty */ + uint32_t dirty : 1; +} __attribute__((packed)); + +#define OCF_ATOMIC_METADATA_SIZE sizeof(struct ocf_atomic_metadata) + +/** + * @brief Get metadata entry (cache mapping) for specified sector of cache + * device + * + * Metadata has sector granularity. It might be used by data object which + * supports atomic writes - (write of data and metadata in one buffer) + * + * @param[in] cache OCF cache instance + * @param[in] addr Sector address in bytes + * @param[out] entry Metadata entry + * + * @retval 0 Metadata retrieved successfully + * @retval Non-zero Error + */ +int ocf_metadata_get_atomic_entry(ocf_cache_t cache, uint64_t addr, + struct ocf_atomic_metadata *entry); + +/** + * @brief Probe cache device + * + * @param[in] ctx handle to object designating ocf context + * @param[in] cache_obj Cache data object + * @param[out] clean_shutdown Cache was graceful stopped + * @param[out] cache_dirty Cache is dirty + * + * @retval 0 Probe successfully performed + * @retval -ENODATA Cache has not been detected + * @retval Non-zero ERROR + */ +int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj, + bool *clean_shutdown, bool *cache_dirty); + +/** + * @brief Check if sectors in cache line before given address are invalid + * + * It might be used by data object which supports + * atomic writes - (write of data and metadata in one buffer) + * + * @param[in] cache OCF cache instance + * @param[in] addr Sector address in bytes + * + * @retval 0 Not all sectors before given address are invalid + * @retval Non-zero Number of sectors before given address + */ +int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr); + +/** + * @brief Check if sectors in cache line after given end address are invalid + * + * It might be used by data object which supports + * atomic writes - (write of data and metadata in one buffer) + * + * @param[in] cache OCF cache instance + * @param[in] addr Sector address in bytes + * @param[in] bytes IO size in bytes + * + * @retval 0 Not all sectors after given end address are invalid + * @retval Non-zero Number of sectors after given end address + */ +int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr, + uint32_t bytes); + +#endif /* __OCF_METADATA_H__ */ diff --git a/inc/ocf_metadata_updater.h b/inc/ocf_metadata_updater.h new file mode 100644 index 0000000..7a5084f --- /dev/null +++ b/inc/ocf_metadata_updater.h @@ -0,0 +1,50 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_METADATA_UPDATER_H__ +#define __OCF_METADATA_UPDATER_H__ + +/** + * @file + * @brief OCF metadata updater API + * + */ + +/** + * @brief Run metadata updater + * + * @param[in] mu Metadata updater instance to run + * + * @retval Hint if there is need to rerun without waiting. + */ +uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu); + +/** + * @brief Set metadata updater private data + * + * @param[in] c Metadata updater handle + * @param[in] priv Private data + */ +void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv); + +/** + * @brief Get metadata updater private data + * + * @param[in] c Metadata updater handle + * + * @retval Metadata updater private data + */ +void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu); + +/** + * @brief Get cache instance to which metadata updater belongs + * + * @param[in] c Metadata updater handle + * + * @retval Cache instance + */ +ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu); + +#endif /* __OCF_METADATA_UPDATER_H__ */ diff --git a/inc/ocf_mngt.h b/inc/ocf_mngt.h new file mode 100644 index 0000000..739aae4 --- /dev/null +++ b/inc/ocf_mngt.h @@ -0,0 +1,813 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_MNGT_H__ +#define __OCF_MNGT_H__ + +#include "ocf_types.h" +#include "ocf_cache.h" +#include "ocf_core.h" + +/** + * @file + * @brief OCF management operations definitions + */ + +/** + * @brief Core start configuration + */ +struct ocf_mngt_core_config { + /** + * @brief OCF core data object UUID + */ + struct ocf_data_obj_uuid uuid; + + /** + * @brief OCF core data object type + */ + uint8_t data_obj_type; + + /** + * @brief OCF core ID number + */ + ocf_core_id_t core_id; + + /** + * @brief OCF core name. In case of being NULL, core id is stringified + * to core name + */ + const char *name; + + /** + * @brief OCF core name size + */ + size_t name_size; + + /** + * @brief OCF cache ID number + */ + ocf_cache_id_t cache_id; + + /** + * @brief Add core to pool if cache isn't present or add core to + * earlier loaded cache + */ + bool try_add; + + uint32_t seq_cutoff_threshold; + /*!< Sequential cutoff threshold (in bytes) */ + + struct { + void *data; + size_t size; + } user_metadata; +}; + +/** + * @brief Get number of OCF caches + * + * @param[in] ctx OCF context + * + * @retval Number of caches in given OCF instance + */ +uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx); + +/* Cache instances getters */ + +/** + * @brief Get OCF cache + * + * @note This function on success also increasing reference counter in given + * cache + * + * @param[in] ctx OCF context + * @param[in] id OCF cache ID + * @param[out] cache OCF cache handle + * + * @retval 0 Get cache successfully + * @retval -OCF_ERR_INV_CACHE_ID Cache ID out of range + * @retval -OCF_ERR_CACHE_NOT_EXIST Cache with given ID is not exist + */ +int ocf_mngt_cache_get(ocf_ctx_t ctx, ocf_cache_id_t id, ocf_cache_t *cache); + +/** + * @brief Decrease reference counter in cache + * + * @note If cache don't have any reference - deallocate it + * + * @param[in] cache Handle to cache + */ +void ocf_mngt_cache_put(ocf_cache_t cache); + +/** + * @brief Lock cache for management oparations (write lock, exclusive) + * + * @param[in] cache Handle to cache + * + * @retval 0 Cache successfully locked + * @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already + * stopping + * @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use + * @retval -OCF_ERR_INTR Wait operation interrupted + */ +int ocf_mngt_cache_lock(ocf_cache_t cache); + +/** + * @brief Lock cache for read - assures cache config does not change while + * lock is being held, while allowing other users to acquire + * read lock in parallel. + * + * @param[in] cache Handle to cache + * + * @retval 0 Cache successfully locked + * @retval -OCF_ERR_CACHE_NOT_EXIST Can not lock cache - cache is already + * stopping + * @retval -OCF_ERR_CACHE_IN_USE Can not lock cache - cache is in use + * @retval -OCF_ERR_INTR Wait operation interrupted + */ +int ocf_mngt_cache_read_lock(ocf_cache_t cache); + +/** + * @brief Write-unlock cache + * + * @param[in] cache Handle to cache + */ +void ocf_mngt_cache_unlock(ocf_cache_t cache); + +/** + * @brief Read-unlock cache + * + * @param[in] cache Handle to cache + */ +void ocf_mngt_cache_read_unlock(ocf_cache_t cache); + +/** + * @brief Cache visitor function + * + * @param[in] cache Handle to cache + * @param[in] cntx Visitor function context + * + * @retval 0 Success + * @retval Non-zero Error + */ +typedef int (*ocf_mngt_cache_visitor_t)(ocf_cache_t cache, void *cntx); + +/** + * @brief Loop for each cache + * + * @note Visitor function is called for each cache + * + * @param[in] ctx OCF context + * @param[in] visitor OCF cache visitor function + * @param[in] cntx Context for cache visitor function + * + * @retval 0 Success + * @retval Non-zero Error + */ +int ocf_mngt_cache_visit(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor, + void *cntx); + +/** + * @brief Loop for each cache reverse + * + * @note Visitor function is called for each cache + * + * @param[in] ctx OCF context + * @param[in] visitor OCF cache visitor function + * @param[in] cntx Context for cache visitor function + * + * @retval 0 Success + * @retval Non-zero Error + */ +int ocf_mngt_cache_visit_reverse(ocf_ctx_t ctx, ocf_mngt_cache_visitor_t visitor, + void *cntx); + +/** + * @brief Cache probe status + */ +struct ocf_mngt_cache_probe_status { + /** + * @brief Gracefully shutdown for cache detected + */ + bool clean_shutdown; + + /** + * @brief Cache is dirty and requires flushing + */ + bool cache_dirty; +}; + +/** + * @brief Cache start configuration + */ +struct ocf_mngt_cache_config { + /** + * @brief Cache ID. In case of setting this field to invalid cache + * id first available cache ID will be set + */ + ocf_cache_id_t id; + + /** + * @brief Cache name. In case of being NULL, cache id is stringified to + * cache name + */ + const char *name; + + /** + * @brief Size of cache name + */ + size_t name_size; + + /** + * @brief Cache mode + */ + ocf_cache_mode_t cache_mode; + + /** + * @brief Eviction policy type + */ + ocf_eviction_t eviction_policy; + + /** + * @brief Cache line size + */ + ocf_cache_line_size_t cache_line_size; + + /** + * @brief Metadata layout (stripping/sequential) + */ + ocf_metadata_layout_t metadata_layout; + + bool metadata_volatile; + + /** + * @brief Backfill configuration + */ + struct { + uint32_t max_queue_size; + uint32_t queue_unblock_size; + } backfill; + + /** + * @brief Number of I/O queues to be created + */ + uint32_t io_queues; + + /** + * @brief Start cache and keep it locked + * + * @note In this case caller is able to perform additional activities + * and then shall unlock cache + */ + bool locked; + + /** + * @brief Use pass-through mode for I/O requests unaligned to 4KiB + */ + bool pt_unaligned_io; + + /** + * @brief If set, try to submit all I/O in fast path. + */ + bool use_submit_io_fast; +}; + +/** + * @brief Cache attach configuration + */ +struct ocf_mngt_cache_device_config { + /** + * @brief Cache data object UUID + */ + struct ocf_data_obj_uuid uuid; + + /** + * @brief Cache data object type + */ + uint8_t data_obj_type; + + /** + * @brief Cache line size + */ + ocf_cache_line_size_t cache_line_size; + + /** + * @brief Ignore warnings and start cache + * + * @note It will force starting cache despite the: + * - overwrite dirty shutdown of previous cache + * - ignore cache with dirty shutdown and reinitialize cache + */ + bool force; + + /** + * @brief Minimum free RAM required to start cache. Set during + * cache start procedure + */ + uint64_t min_free_ram; + + /** + * @brief If set, cache features (like discard) are tested + * before starting cache + */ + bool perform_test; + + /** + * @brief If set, cache device will be discarded on cache start + */ + bool discard_on_start; +}; + +/** + * @brief Start cache instance + * + * @param[in] ctx OCF context + * @param[out] cache Cache handle + * @param[in] cfg Starting cache configuration + * + * @retval 0 Cache started successfully + * @retval Non-zero Error occurred and starting cache failed + */ +int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache, + struct ocf_mngt_cache_config *cfg); + +/** + * @brief Stop cache instance + * + * @param[in] cache Cache handle + * + * @retval 0 Cache successfully stopped + * @retval Non-zero Error occurred during stopping cache + */ +int ocf_mngt_cache_stop(ocf_cache_t cache); + +/** + * @brief Stop cache instance without acquiring cache lock - caller is + * required to hold cache write lock when calling this + * + * @param[in] cache Cache handle + * + * @retval 0 Cache successfully stopped + * @retval Non-zero Error occurred during stopping cache + */ +int ocf_mngt_cache_stop_nolock(ocf_cache_t cache); + +/** + * @brief Attach caching device to cache instance + * + * @param[in] cache Cache handle + * @param[in] device_cfg Caching device configuration + * + * @retval 0 Cache cache successfully attached + * @retval Non-zero Error occurred during attaching cache + */ +int ocf_mngt_cache_attach(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg); + +/** + * @brief Attach caching device to cache instance without acquiring cache lock + * - caller is required to hold cache write lock when calling this + * + * @param[in] cache Cache handle + * @param[in] device_cfg Caching device configuration + * + * @retval 0 Cache cache successfully attached + * @retval Non-zero Error occurred during attaching cache + */ +int ocf_mngt_cache_attach_nolock(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg); + +/** + * @brief Detach caching cache + * + * @param[in] cache Cache handle + * + * @retval 0 Cache cache successfully detached + * @retval Non-zero Error occurred during stopping cache + */ +int ocf_mngt_cache_detach(ocf_cache_t cache); + +/** + * @brief Load cache instance + * + * @param[in] cache Cache handle + * @param[in] cfg Cache configuration + * @param[in] device_cfg Caching device configuration + * + * @retval 0 Cache successfully loaded + * @retval Non-zero Error occurred during loading cache + */ +int ocf_mngt_cache_load(ocf_ctx_t ctx, ocf_cache_t *cache, + struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg); + +/* Adding and removing cores */ + +/** + * @brief Add core to cache instance + * + * @param[in] cache Cache handle + * @param[in] core Core object handle + * @param[in] cfg Core configuration + * + * @retval 0 Core successfully added core to cache + * @retval Non-zero Error occurred and adding core failed + */ +int ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg); + +/** + * @brief Add core to cache instance without acquiring cache lock - caller is + required to hold cache write lock when calling this + * + * @param[in] cache Cache handle + * @param[in] core Core object handle + * @param[in] cfg Core configuration + * + * @retval 0 Core successfully added core to cache + * @retval Non-zero Error occurred and adding core failed + */ +int ocf_mngt_cache_add_core_nolock(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg); + +/** + * @brief Remove core from cache instance + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] detach only detach core without removing it from cache metadata + * + * @retval 0 Core successfully removed core from cache + * @retval Non-zero Error occurred and removing core failed + */ +int ocf_mngt_cache_remove_core(ocf_cache_t cache, ocf_core_id_t core_id, + bool detach); + +/** + * @brief Remove core from cache instance without acquiring cache lock - caller + * is required to hold cache write lock when calling this + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] detach only detach core without removing it from cache metadata + * + * @retval 0 Core successfully removed core from cache + * @retval Non-zero Error occurred and removing core failed + */ +int ocf_mngt_cache_remove_core_nolock(ocf_cache_t cache, ocf_core_id_t core_id, + bool detach); + +/* Flush operations */ + +/** + * @brief Flush data from given cache + * + * @param[in] cache Cache handle + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully flushed given cache + * @retval Non-zero Error occurred and flushing cache failed + */ +int ocf_mngt_cache_flush(ocf_cache_t cache, bool interruption); + +/** + * @brief Flush data from given cache without acquiring cache lock - caller is + * required to hold cache write OR read lock when calling this + * + * @param[in] cache Cache handle + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully flushed given cache + * @retval Non-zero Error occurred and flushing cache failed + */ +int ocf_mngt_cache_flush_nolock(ocf_cache_t cache, bool interruption); + +/** + * @brief Flush data to given core + * + * @param[in] cache Cache handle + * @param[in] id Core ID + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully flushed data to given core + * @retval Non-zero Error occurred and flushing data to core failed + */ +int ocf_mngt_core_flush(ocf_cache_t cache, ocf_core_id_t id, bool interruption); + +/** + * @brief Flush data to given core without acquiring cache lock - caller is + * required to hold cache write OR read lock when calling this + * + * @param[in] cache Cache handle + * @param[in] id Core ID + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully flushed data to given core + * @retval Non-zero Error occurred and flushing data to core failed + */ +int ocf_mngt_core_flush_nolock(ocf_cache_t cache, ocf_core_id_t id, + bool interruption); +/** + * @brief Interrupt existing flushing of cache or cache + * + * @param[in] cache Cache instance + * + * @retval 0 Operation success + * @retval Non-zero Operation failure + */ +int ocf_mngt_cache_flush_interrupt(ocf_cache_t cache); + +/** + * @brief Purge data to given core + * + * @param[in] cache Cache handle + * @param[in] id Core ID + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully purged data to given core + * @retval Non-zero Error occurred and purging data to core failed + */ +int ocf_mngt_core_purge(ocf_cache_t cache, ocf_core_id_t id, bool interruption); +/** + * @brief Purge data from given cache + * + * @param[in] cache Cache handle + * @param[in] interruption Allow for interruption + * + * @retval 0 Successfully purged given cache + * @retval Non-zero Error occurred and purging cache failed + */ +int ocf_mngt_cache_purge(ocf_cache_t cache, bool interruption); + +/** + * @brief Set cleaning policy in given cache + * + * @param[in] cache Cache handle + * @param[in] type Cleainig policy type + * + * @retval 0 Policy has been set successfully + * @retval Non-zero Error occurred and policy has not been set + */ +int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type); + +/** + * @brief Get current cleaning policy from given cache + * + * @param[in] cache Cache handle + * @param[out] type Variable to store current cleaning policy type + * + * @retval 0 Policy has been get successfully + * @retval Non-zero Error occurred and policy has not been get + */ +int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type); + +/** + * @brief Set cleaning parameter in given cache + * + * @param[in] cache Cache handle + * @param[in] param_id Cleaning policy parameter id + * @param[in] param_value Cleaning policy parameter value + * + * @retval 0 Parameter has been set successfully + * @retval Non-zero Error occurred and parameter has not been set + */ +int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type, + uint32_t param_id, uint32_t param_value); + +/** + * @brief Get cleaning parameter from given cache + * + * @param[in] cache Cache handle + * @param[in] param_id Cleaning policy parameter id + * @param[in] param_value Variable to store parameter value + * + * @retval 0 Parameter has been get successfully + * @retval Non-zero Error occurred and parameter has not been get + */ +int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache,ocf_cleaning_t type, + uint32_t param_id, uint32_t *param_value); + +/** + * @brief IO class configuration + */ +struct ocf_mngt_io_class_config { + /** + * @brief IO class ID + */ + uint32_t class_id; + + /** + * @brief IO class name + */ + const char *name; + + /** + * @brief IO class eviction priority + */ + int16_t prio; + + /** + * @brief IO class cache mode + */ + ocf_cache_mode_t cache_mode; + + /** + * @brief IO class minimum size + */ + uint32_t min_size; + + /** + * @brief IO class maximum size + */ + uint32_t max_size; +}; + +/** + * @brief Configure IO class in given cache + * + * @param[in] cache Cache handle + * @param[in] cfg IO class configuration + * + * @retval 0 Configuration have been set successfully + * @retval Non-zero Error occurred and configuration not been set + */ +int ocf_mngt_io_class_configure(ocf_cache_t cache, + const struct ocf_mngt_io_class_config *cfg); + +/** + * @brief Set core sequential cutoff threshold + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] thresh threshold in bytes for sequential cutoff + * + * @retval 0 Sequential cutoff threshold has been set successfully + * @retval Non-zero Error occured and threshold hasn't been updated + */ +int ocf_mngt_set_seq_cutoff_threshold(ocf_cache_t cache, ocf_core_id_t core_id, + uint32_t thresh); + +/** + * @brief Set core sequential cutoff policy + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] policy sequential cutoff policy + * + * @retval 0 Sequential cutoff policy has been set successfully + * @retval Non-zero Error occured and policy hasn't been updated + */ +int ocf_mngt_set_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id, + ocf_seq_cutoff_policy policy); + +/** + * @brief Get core sequential cutoff threshold + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] thresh threshold in bytes for sequential cutoff + * + * @retval 0 Sequential cutoff threshold has been get successfully + * @retval Non-zero Error occured + */ +int ocf_mngt_get_seq_cutoff_threshold(ocf_cache_t cache, ocf_core_id_t core_id, + uint32_t *thresh); + +/** + * @brief Get core sequential cutoff policy + * + * @param[in] cache Cache handle + * @param[in] core_id Core ID + * @param[in] policy sequential cutoff policy + * + * @retval 0 Sequential cutoff policy has been get successfully + * @retval Non-zero Error occured + */ +int ocf_mngt_get_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id, + ocf_seq_cutoff_policy *policy); + +/** + * @brief Set cache mode in given cache + * + * @param[in] cache Cache handle + * @param[in] mode Cache mode to set + * @param[in] flush Perform flushing before switch cache mode + * + * @retval 0 Cache mode have been set successfully + * @retval Non-zero Error occurred and cache mode not been set + */ +int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode, + uint8_t flush); + +/** + * @brief Set cache fallback Pass Through error threshold + * + * @param[in] cache Cache handle + * @param[in] threshold Value to be set as threshold + * + * @retval 0 Fallback-PT threshold have been set successfully + * @retval Non-zero Error occurred + */ +int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache, + uint32_t threshold); + +/** + * @brief Get cache fallback Pass Through error threshold + * + * @param[in] cache Cache handle + * @param[out] threshold Fallback-PT threshold + * + * @retval 0 Fallback-PT threshold have been get successfully + * @retval Non-zero Error occurred + */ +int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache, + uint32_t *threshold); + +/** + * @brief Reset cache fallback Pass Through error counter + * + * @param[in] cache Cache handle + * + * @retval 0 Threshold have been reset successfully + */ +int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache); + +/** + * @brief Initialize core pool + * + * @param[in] ctx OCF context + */ +void ocf_mngt_core_pool_init(ocf_ctx_t ctx); + +/** + * @brief Get core pool count + * + * @param[in] ctx OCF context + * + * @retval Number of cores in core pool + */ +int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx); + +/** + * @brief Add core to pool + * + * @param[in] ctx OCF context + * @param[in] uuid Cache data object UUID + * @param[in] type OCF core data object type + * + * @retval 0 Core added to pool successfully + * @retval Non-zero Error occurred and adding core to poll failed + */ +int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type); + +/** + * @brief Add core to pool + * + * @param[in] ctx OCF context + * @param[in] uuid Cache data object UUID + * @param[in] type OCF core data object type + * + * @retval Handler to object with same UUID + * @retval NULL Not found object with that id + */ +ocf_data_obj_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid, + ocf_data_obj_type_t type); +/** + * @brief Iterate over all object in pool and call visitor callback + * + * @param[in] ctx OCF context + * @param[in] visitor Visitor callback + * @param[in] visior_ctx CContext for visitor callback + * + * @retval Handler to object with same UUID + * @retval NULL Not found object with that id + */ +int ocf_mngt_core_pool_visit(ocf_ctx_t ctx, + int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx); + +/** + * @brief Remove core from pool + * + * @param[in] ctx OCF context + * @param[in] obj Core data object + */ +void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_data_obj_t obj); + +/** + * @brief Close and remove core from pool + * + * @param[in] ctx OCF context + * @param[in] obj Core data object + */ +void ocf_mngt_core_pool_close_and_remove(ocf_ctx_t ctx, ocf_data_obj_t obj); + +/** + * @brief Deinit core pool + * + * @param[in] ctx OCF context + */ +void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx); + +#endif /* __OCF_CACHE_H__ */ diff --git a/inc/ocf_queue.h b/inc/ocf_queue.h new file mode 100644 index 0000000..2875ad1 --- /dev/null +++ b/inc/ocf_queue.h @@ -0,0 +1,65 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_QUEUE_H_ +#define OCF_QUEUE_H_ + +/** + * @file + * @brief OCF queues API + */ + +/** + * @brief Run queue processing + * + * @param[in] q Queue to run + */ +void ocf_queue_run(ocf_queue_t q); + +/** + * @brief Set queue private data + * + * @param[in] q I/O queue + * @param[in] priv Private data + */ +void ocf_queue_set_priv(ocf_queue_t q, void *priv); + +/** + * @brief Get queue private data + * + * @param[in] q I/O queue + * + * @retval I/O queue private data + */ +void *ocf_queue_get_priv(ocf_queue_t q); + +/** + * @brief Get number of pending requests in I/O queue + * + * @param[in] q I/O queue + * + * @retval Number of pending requests in I/O queue + */ +uint32_t ocf_queue_pending_io(ocf_queue_t q); + +/** + * @brief Get cache instance to which I/O queue belongs + * + * @param[in] q I/O queue + * + * @retval Cache instance + */ +ocf_cache_t ocf_queue_get_cache(ocf_queue_t q); + +/** + * @brief Get I/O queue id + * + * @param[in] q I/O queue + * + * @retval I/O queue id + */ +uint32_t ocf_queue_get_id(ocf_queue_t q); + +#endif diff --git a/inc/ocf_stats.h b/inc/ocf_stats.h new file mode 100644 index 0000000..bf43066 --- /dev/null +++ b/inc/ocf_stats.h @@ -0,0 +1,207 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/** + * @file + * @brief OCF API for getting and reseting statistics + * + * This file contains routines pertaining to retrieval and + * manipulation of OCF IO statistics. + */ + +#ifndef __OCF_STATS_H__ +#define __OCF_STATS_H__ + +struct ocf_io; + +/** + * @brief OCF requests statistics like hit, miss, etc... + * + * @note To calculate number of hits request do: + * total - (partial_miss + full_miss) + */ +struct ocf_stats_req { + /** Number of partial misses */ + uint64_t partial_miss; + + /** Number of full misses */ + uint64_t full_miss; + + /** Total of requests */ + uint64_t total; + + /** Pass-through requests */ + uint64_t pass_through; +}; + +/** + * @brief OCF error statistics + */ +struct ocf_stats_error { + /** Read errors */ + uint32_t read; + + /** Write errors */ + uint32_t write; +}; + +/** + * @brief OCF block statistics in bytes + */ +struct ocf_stats_block { + /** Number of blocks read */ + uint64_t read; + + /** Number of blocks written */ + uint64_t write; +}; + +/** + * Statistics appropriate for given IO class + */ +struct ocf_stats_io_class { + /** Read requests statistics */ + struct ocf_stats_req read_reqs; + + /** Writes requests statistics */ + struct ocf_stats_req write_reqs; + + /** Block requests statistics */ + struct ocf_stats_block blocks; + + /** Number of cache lines available for given partition */ + uint64_t free_clines; + + /** Number of cache lines within lru list */ + uint64_t occupancy_clines; + + /** Number of dirty cache lines assigned to specific partition */ + uint64_t dirty_clines; +}; + +#define IO_PACKET_NO 12 +#define IO_ALIGN_NO 4 + +/** + * @brief Core debug statistics + */ +struct ocf_stats_core_debug { + /** I/O sizes being read (grouped by packets) */ + uint64_t read_size[IO_PACKET_NO]; + + /** I/O sizes being written (grouped by packets) */ + uint64_t write_size[IO_PACKET_NO]; + + /** I/O alignment for reads */ + uint64_t read_align[IO_ALIGN_NO]; + + /** I/O alignment for writes */ + uint64_t write_align[IO_ALIGN_NO]; +}; + +/** + * @brief OCF core statistics + */ +struct ocf_stats_core { + /** Core size in cache line size unit */ + uint64_t core_size; + + /** Core size in bytes unit */ + uint64_t core_size_bytes; + + /** Number of cache lines allocated in the cache for this core */ + uint32_t cache_occupancy; + + /** Number of dirty cache lines allocated in the cache for this core */ + uint32_t dirty; + + /** Number of block flushed in ongoing flush operation */ + uint32_t flushed; + + /** How long core is dirty in seconds unit */ + uint32_t dirty_for; + + /** Read requests statistics */ + struct ocf_stats_req read_reqs; + + /** Write requests statistics */ + struct ocf_stats_req write_reqs; + + /** Block requests for cache data object statistics */ + struct ocf_stats_block cache_obj; + + /** Block requests for core data object statistics */ + struct ocf_stats_block core_obj; + + /** Block requests submitted by user to this core */ + struct ocf_stats_block core; + + /** Cache data object error statistics */ + struct ocf_stats_error cache_errors; + + /** Core data object error statistics */ + struct ocf_stats_error core_errors; + + /** Debug statistics */ + struct ocf_stats_core_debug debug_stat; + + /** Sequential cutoff threshold (in bytes) */ + uint32_t seq_cutoff_threshold; + + /** Sequential cutoff policy */ + ocf_seq_cutoff_policy seq_cutoff_policy; +}; + +/** + * @brief Initialize or reset statistics. + * + * Initialize or reset counters used for statistics. + * + * @param[in] cache OCF cache device handle + * @param[in] core_id Id of core for which statistics should be initialized. + */ +int ocf_stats_initialize(ocf_cache_t cache, ocf_core_id_t core_id); + +/** + * @brief ocf_io_class_get_stats retrieve cache statistics + * + * Retrieve buffer of cache statistics for given cache instance. + * + * @param[in] core core ID to which request pertains + * @param[in] io_class IO class, stats of which are requested + * @param[out] stats statistics structure that shall be filled as + * a result of this function invocation. + * + * @result zero upon successful completion; error code otherwise + */ +int ocf_io_class_get_stats(ocf_core_t core, uint32_t io_class, + struct ocf_stats_io_class *stats); + +/** + * @brief retrieve core stats + * + * Retrieve ocf per core stats (for all IO classes together) + * + * @param[in] core core ID to which request pertains + * @param[out] stats statistics structure that shall be filled as + * a result of this function invocation. + * + * @result zero upon successful completion; error code otherwise + */ +int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats); + +/** + * @brief update stats given IO request + * + * Function meant to update stats for IO request. + * + * @note This function shall be invoked for eac IO request processed + * + * @param[in] core to which request pertains + * @param[in] io request for which stats are being updated + */ +void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io); + +#endif /* __OCF_STATS_H__ */ diff --git a/inc/ocf_stats_builder.h b/inc/ocf_stats_builder.h new file mode 100644 index 0000000..6756431 --- /dev/null +++ b/inc/ocf_stats_builder.h @@ -0,0 +1,190 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/** + * @file + * @brief OCF API for collecting statistics + * + * This file contains routines pertaining to retrieval and + * manipulation of OCF IO statistics. + */ + +#ifndef __OCF_STATS_BUILDER_H__ +#define __OCF_STATS_BUILDER_H__ + +/** + * Entire row of statistcs + */ +struct ocf_stat { + /** Value */ + uint64_t value; + /** percent x10 */ + uint64_t percent; +}; + +/** + * @brief Usage statistics in 4 KiB unit + * + * An example of presenting statistics: + *
+ * ╔══════════════════╤══════════╤═══════╤═════════════╗
+ * ║ Usage statistics │  Count   │   %   │   Units     ║
+ * ╠══════════════════╪══════════╪═══════╪═════════════╣
+ * ║ Occupancy        │       20 │  50.0 │ 4KiB blocks ║
+ * ║ Free             │       20 │  50.0 │ 4KiB blocks ║
+ * ║ Clean            │       15 │  75.0 │ 4KiB blocks ║
+ * ║ Dirty            │        5 │  25.0 │ 4KiB blocks ║
+ * ╚══════════════════╧══════════╧═══════╧═════════════╝
+ * 
+ */ +struct ocf_stats_usage { + struct ocf_stat occupancy; + struct ocf_stat free; + struct ocf_stat clean; + struct ocf_stat dirty; +}; + +/** + * @brief Requests statistcs + * + * An example of presenting statistics: + *
+ * ╔══════════════════════╤═══════╤═══════╤══════════╗
+ * ║ Request statistics   │ Count │   %   │ Units    ║
+ * ╠══════════════════════╪═══════╪═══════╪══════════╣
+ * ║ Read hits            │    10 │   4.5 │ Requests ║
+ * ║ Read partial misses  │     1 │   0.5 │ Requests ║
+ * ║ Read full misses     │   211 │  95.0 │ Requests ║
+ * ║ Read total           │   222 │ 100.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Write hits           │     0 │   0.0 │ Requests ║
+ * ║ Write partial misses │     0 │   0.0 │ Requests ║
+ * ║ Write full misses    │     0 │   0.0 │ Requests ║
+ * ║ Write total          │     0 │   0.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Pass-Through reads   │     0 │   0.0 │ Requests ║
+ * ║ Pass-Through writes  │     0 │   0.0 │ Requests ║
+ * ║ Serviced requests    │   222 │ 100.0 │ Requests ║
+ * ╟──────────────────────┼───────┼───────┼──────────╢
+ * ║ Total requests       │   222 │ 100.0 │ Requests ║
+ * ╚══════════════════════╧═══════╧═══════╧══════════╝
+ * 
+ */ +struct ocf_stats_requests { + struct ocf_stat rd_hits; + struct ocf_stat rd_partial_misses; + struct ocf_stat rd_full_misses; + struct ocf_stat rd_total; + struct ocf_stat wr_hits; + struct ocf_stat wr_partial_misses; + struct ocf_stat wr_full_misses; + struct ocf_stat wr_total; + struct ocf_stat rd_pt; + struct ocf_stat wr_pt; + struct ocf_stat serviced; + struct ocf_stat total; +}; + +/** + * @brief Block statistics + * + * An example of presenting statistics: + *
+ * ╔════════════════════════════════════╤═══════╤═══════╤═════════════╗
+ * ║ Block statistics                   │ Count │   %   │   Units     ║
+ * ╠════════════════════════════════════╪═══════╪═══════╪═════════════╣
+ * ║ Reads from core data object(s)     │   426 │ 100.0 │ 4KiB blocks ║
+ * ║ Writes to core data object(s)      │     0 │   0.0 │ 4KiB blocks ║
+ * ║ Total to/from core data object (s) │   426 │ 100.0 │ 4KiB blocks ║
+ * ╟────────────────────────────────────┼───────┼───────┼─────────────╢
+ * ║ Reads from cache data object       │    13 │   3.0 │ 4KiB blocks ║
+ * ║ Writes to cache data object        │   426 │  97.0 │ 4KiB blocks ║
+ * ║ Total to/from cache data object    │   439 │ 100.0 │ 4KiB blocks ║
+ * ╟────────────────────────────────────┼───────┼───────┼─────────────╢
+ * ║ Reads from core(s)                 │   439 │ 100.0 │ 4KiB blocks ║
+ * ║ Writes to core(s)                  │     0 │   0.0 │ 4KiB blocks ║
+ * ║ Total to/from core(s)              │   439 │ 100.0 │ 4KiB blocks ║
+ * ╚════════════════════════════════════╧═══════╧═══════╧═════════════╝
+ * 
+ */ +struct ocf_stats_blocks { + struct ocf_stat core_obj_rd; + struct ocf_stat core_obj_wr; + struct ocf_stat core_obj_total; + struct ocf_stat cache_obj_rd; + struct ocf_stat cache_obj_wr; + struct ocf_stat cache_obj_total; + struct ocf_stat volume_rd; + struct ocf_stat volume_wr; + struct ocf_stat volume_total; +}; + +/** + * @brief Errors statistics + * + * An example of presenting statistics: + *
+ * ╔════════════════════╤═══════╤═════╤══════════╗
+ * ║ Error statistics   │ Count │  %  │ Units    ║
+ * ╠════════════════════╪═══════╪═════╪══════════╣
+ * ║ Cache read errors  │     0 │ 0.0 │ Requests ║
+ * ║ Cache write errors │     0 │ 0.0 │ Requests ║
+ * ║ Cache total errors │     0 │ 0.0 │ Requests ║
+ * ╟────────────────────┼───────┼─────┼──────────╢
+ * ║ Core read errors   │     0 │ 0.0 │ Requests ║
+ * ║ Core write errors  │     0 │ 0.0 │ Requests ║
+ * ║ Core total errors  │     0 │ 0.0 │ Requests ║
+ * ╟────────────────────┼───────┼─────┼──────────╢
+ * ║ Total errors       │     0 │ 0.0 │ Requests ║
+ * ╚════════════════════╧═══════╧═════╧══════════╝
+ * 
+ */ +struct ocf_stats_errors { + struct ocf_stat core_obj_rd; + struct ocf_stat core_obj_wr; + struct ocf_stat core_obj_total; + struct ocf_stat cache_obj_rd; + struct ocf_stat cache_obj_wr; + struct ocf_stat cache_obj_total; + struct ocf_stat total; +}; + +/** + * @param Collect statistics for given cache + * + * @param cache Cache instance for each statistics will be collected + * @param usage Usage statistics + * @param rq Request statistics + * @param blocks Blocks statistics + * @param errors Errors statistics + * + * @retval 0 Success + * @retval Non-zero Error + */ +int ocf_stats_collect_cache(ocf_cache_t cache, + struct ocf_stats_usage *usage, + struct ocf_stats_requests *rq, + struct ocf_stats_blocks *blocks, + struct ocf_stats_errors *errors); + +/** + * @param Collect statistics for given core + * + * @param cache Core for each statistics will be collected + * @param usage Usage statistics + * @param rq Request statistics + * @param blocks Blocks statistics + * @param errors Errors statistics + * + * @retval 0 Success + * @retval Non-zero Error + */ +int ocf_stats_collect_core(ocf_core_t core, + struct ocf_stats_usage *usage, + struct ocf_stats_requests *rq, + struct ocf_stats_blocks *blocks, + struct ocf_stats_errors *errors); + +#endif /* __OCF_STATS_BUILDER_H__ */ diff --git a/inc/ocf_types.h b/inc/ocf_types.h new file mode 100644 index 0000000..551bafb --- /dev/null +++ b/inc/ocf_types.h @@ -0,0 +1,95 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/** + * @file + * @brief OCF types + */ +#ifndef __OCF_TYPES_H_ +#define __OCF_TYPES_H_ + +#include "ocf_env_headers.h" + +/** + * @brief cache id type (by default designated as 16 bit unsigned integer) + */ +typedef uint16_t ocf_cache_id_t; + +/** + * @brief cache line type (by default designated as 32 bit unsigned integer) + */ +typedef uint32_t ocf_cache_line_t; + +/** + * @brief core id type (by default designated as 16 bit unsigned integer) + */ +typedef uint16_t ocf_core_id_t; + +/** + * @brief core sequence number type (by default designated as 16 bit unsigned integer) + */ +typedef uint16_t ocf_seq_no_t; + +/** + * @brief partition id type (by default designated as 16 bit unsigned integer) + */ +typedef uint16_t ocf_part_id_t; + +/** + * @brief handle to object designating ocf context + */ +typedef struct ocf_ctx *ocf_ctx_t; + +struct ocf_cache; +/** + * @brief handle to object designating ocf cache device + */ +typedef struct ocf_cache *ocf_cache_t; + +struct ocf_core; +/** + * @brief handle to object designating ocf core object + */ +typedef struct ocf_core *ocf_core_t; + +struct ocf_data_obj; +/** + * @brief handle to object designating ocf data object + */ +typedef struct ocf_data_obj *ocf_data_obj_t; + + +struct ocf_data_obj_type; +/** + * @brief handle to data object type + */ +typedef const struct ocf_data_obj_type *ocf_data_obj_type_t; + +/** + * @brief handle to data object uuid + */ +typedef struct ocf_data_obj_uuid *ocf_uuid_t; + +/** + * @brief handle to object designating ocf context object + */ +typedef void ctx_data_t; + +/** + * @brief handle to I/O queue + */ +typedef struct ocf_queue *ocf_queue_t; + +/** + * @brief handle to cleaner + */ +typedef struct ocf_cleaner *ocf_cleaner_t; + +/** + * @brief handle to metadata_updater + */ +typedef struct ocf_metadata_updater *ocf_metadata_updater_t; + +#endif diff --git a/inc/ocf_utilities.h b/inc/ocf_utilities.h new file mode 100644 index 0000000..a4feb71 --- /dev/null +++ b/inc/ocf_utilities.h @@ -0,0 +1,74 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_UTILITIES_H__ +#define __OCF_UTILITIES_H__ + +/** + * @file + * @brief OCF memory pool reference + */ + +struct ocf_mpool; + +/** + * @brief Create OCF memory pool + * + * @param cache OCF cache instance + * @param size Size of particular item + * @param hdr_size Header size before array of items + * @param flags Allocation flags + * @param mpool_max Maximal allocator size (power of two) + * @param fmt_name Format name of allocator + * @param ... Format parameters + * + * @return OCF memory pool reference + */ +struct ocf_mpool *ocf_mpool_create(struct ocf_cache *cache, + uint32_t hdr_size, uint32_t size, int flags, int mpool_max, + const char *name_perfix); + +/** + * @brief Destroy existing memory pool + * + * @param mpool memory pool + */ +void ocf_mpool_destroy(struct ocf_mpool *mpool); + +/** + * @brief Allocate new items of memory pool + * + * @note Allocation based on ATOMIC memory pool and this function can be called + * when IRQ disable + * + * @param mpool OCF memory pool reference + * @param count Count of elements to be allocated + * + * @return Pointer to the new items + */ +void *ocf_mpool_new(struct ocf_mpool *mpool, uint32_t count); + +/** + * @brief Allocate new items of memory pool with specified allocation flag + * + * @param mpool OCF memory pool reference + * @param count Count of elements to be allocated + * @param flags Kernel allocation falgs + * + * @return Pointer to the new items + */ +void *ocf_mpool_new_f(struct ocf_mpool *mpool, uint32_t count, int flags); + +/** + * @brief Free existing items of memory pool + * + * @param mpool OCF memory pool reference + * @param items Items to be freed + * @param count - Count of elements to be free + */ +void ocf_mpool_del(struct ocf_mpool *mpool, void *items, uint32_t count); + +#endif /* __OCF_UTILITIES_H__ */ diff --git a/src/cleaning/acp.c b/src/cleaning/acp.c new file mode 100644 index 0000000..0bc3f32 --- /dev/null +++ b/src/cleaning/acp.c @@ -0,0 +1,735 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "cleaning.h" +#include "../metadata/metadata.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_rq.h" +#include "../cleaning/acp.h" +#include "../engine/engine_common.h" +#include "../concurrency/ocf_cache_concurrency.h" +#include "cleaning_priv.h" + +#define OCF_ACP_DEBUG 0 + +#if 1 == OCF_ACP_DEBUG + +#define OCF_DEBUG_PREFIX "[Clean] %s():%d " + +#define OCF_DEBUG_LOG(cache, format, ...) \ + ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \ + format"\n", __func__, __LINE__, ##__VA_ARGS__) + +#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "") + +#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \ + ##__VA_ARGS__) + +#define ACP_DEBUG_INIT(acp) acp->checksum = 0 +#define ACP_DEBUG_BEGIN(acp, cache_line) acp->checksum ^= cache_line +#define ACP_DEBUG_END(acp, cache_line) acp->checksum ^= cache_line +#define ACP_DEBUG_CHECK(acp) ENV_BUG_ON(acp->checksum) +#else +#define OCF_DEBUG_PREFIX +#define OCF_DEBUG_LOG(cache, format, ...) +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#define ACP_DEBUG_INIT(acp) +#define ACP_DEBUG_BEGIN(acp, cache_line) +#define ACP_DEBUG_END(acp, cache_line) +#define ACP_DEBUG_CHECK(acp) +#endif + +#define ACP_CHUNK_SIZE (100 * MiB) + +/* minimal time to chunk cleaning after error */ +#define ACP_CHUNK_CLEANING_BACKOFF_TIME 5 + +/* time to sleep when nothing to clean in ms */ +#define ACP_BACKOFF_TIME_MS 1000 + +#define ACP_MAX_BUCKETS 11 + +/* Upper thresholds for buckets in percent dirty pages. First bucket should have + * threshold=0 - it isn't cleaned and we don't want dirty chunks staying dirty + * forever. Last bucket also should stay at 100 for obvious reasons */ +static const uint16_t ACP_BUCKET_DEFAULTS[ACP_MAX_BUCKETS] = { 0, 10, 20, 30, 40, + 50, 60, 70, 80, 90, 100 }; + +struct acp_flush_context { + /* number of cache lines in flush */ + uint64_t size; + /* chunk_for error handling */ + struct acp_chunk_info *chunk; + /* cache lines to flush */ + struct flush_data data[OCF_ACP_MAX_FLUSH_MAX_BUFFERS]; + /* flush error code */ + int error; +}; + +struct acp_state { + /* currently cleaned chunk */ + struct acp_chunk_info *chunk; + + /* cache line iterator within current chunk */ + unsigned iter; + + /* true if there are cache lines to process + * current chunk */ + bool in_progress; +}; + +struct acp_chunk_info { + struct list_head list; + uint64_t chunk_id; + uint64_t next_cleaning_timestamp; + ocf_core_id_t core_id; + uint16_t num_dirty; + uint8_t bucket_id; +}; + +struct acp_bucket { + struct list_head chunk_list; + uint16_t threshold; /* threshold in clines */ +}; + +struct acp_context { + env_rwsem chunks_lock; + + /* number of chunks per core */ + uint64_t num_chunks[OCF_CORE_MAX]; + + /* per core array of all chunks */ + struct acp_chunk_info *chunk_info[OCF_CORE_MAX]; + + struct acp_bucket bucket_info[ACP_MAX_BUCKETS]; + + /* total number of chunks in cache */ + uint64_t chunks_total; + + /* structure to keep track of I/O in progress */ + struct acp_flush_context flush; + + /* cleaning state persistent over subsequent calls to + perform_cleaning */ + struct acp_state state; + +#if 1 == OCF_ACP_DEBUG + /* debug only */ + uint64_t checksum; +#endif +}; + +struct acp_core_line_info +{ + ocf_cache_line_t cache_line; + ocf_core_id_t core_id; + uint64_t core_line; +}; + +#define ACP_LOCK_CHUNKS_RD() env_rwsem_down_read(&acp->chunks_lock) + +#define ACP_UNLOCK_CHUNKS_RD() env_rwsem_up_read(&acp->chunks_lock) + +#define ACP_LOCK_CHUNKS_WR() env_rwsem_down_write(&acp->chunks_lock) + +#define ACP_UNLOCK_CHUNKS_WR() env_rwsem_up_write(&acp->chunks_lock) + +static struct acp_context *_acp_get_ctx_from_cache(struct ocf_cache *cache) +{ + return cache->cleaning_policy_context; +} + +static struct acp_cleaning_policy_meta* _acp_meta_get( + struct ocf_cache *cache, uint32_t cache_line, + struct cleaning_policy_meta *policy_meta) +{ + ocf_metadata_get_cleaning_policy(cache, cache_line, policy_meta); + return &policy_meta->meta.acp; +} + +static void _acp_meta_set(struct ocf_cache *cache, uint32_t cache_line, + struct cleaning_policy_meta *policy_meta) +{ + ocf_metadata_set_cleaning_policy(cache, cache_line, policy_meta); +} + +static struct acp_core_line_info _acp_core_line_info(struct ocf_cache *cache, + ocf_cache_line_t cache_line) +{ + struct acp_core_line_info acp_core_line_info = {.cache_line = cache_line, }; + ocf_metadata_get_core_info(cache, cache_line, &acp_core_line_info.core_id, + &acp_core_line_info.core_line); + return acp_core_line_info; +} + +static struct acp_chunk_info *_acp_get_chunk(struct ocf_cache *cache, + uint32_t cache_line) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + struct acp_core_line_info core_line = + _acp_core_line_info(cache, cache_line); + uint64_t chunk_id; + + chunk_id = core_line.core_line * ocf_line_size(cache) / ACP_CHUNK_SIZE; + + return &acp->chunk_info[core_line.core_id][chunk_id]; +} + +#define for_each_core(cache, iter) \ + for (iter = 0; iter < OCF_CORE_MAX; iter++) \ + if (cache->core_conf_meta[iter].added) + +static void _acp_remove_cores(struct ocf_cache *cache) +{ + int i; + + for_each_core(cache, i) + cleaning_policy_acp_remove_core(cache, i); +} + +static int _acp_load_cores(struct ocf_cache *cache) +{ + int i; + int err = 0; + + for_each_core(cache, i) { + OCF_DEBUG_PARAM(cache, "loading core %i\n", i); + err = cleaning_policy_acp_add_core(cache, i); + if (err) + break; + } + + if (err) + _acp_remove_cores(cache); + + return err; +} + +void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + struct cleaning_policy_meta policy_meta; + struct acp_cleaning_policy_meta *acp_meta; + + /* TODO: acp meta is going to be removed soon */ + acp_meta = _acp_meta_get(cache, cache_line, &policy_meta); + acp_meta->dirty = 0; + _acp_meta_set(cache, cache_line, &policy_meta); +} + +void cleaning_policy_acp_deinitialize(struct ocf_cache *cache) +{ + _acp_remove_cores(cache); + + env_vfree(cache->cleaning_policy_context); + cache->cleaning_policy_context = NULL; +} + +static void _acp_rebuild(struct ocf_cache *cache) +{ + ocf_cache_line_t cline; + ocf_core_id_t cline_core_id; + uint32_t step = 0; + + for (cline = 0; cline < cache->device->collision_table_entries; cline++) { + ocf_metadata_get_core_and_part_id(cache, cline, &cline_core_id, + NULL); + + OCF_COND_RESCHED_DEFAULT(step); + + if (cline_core_id == OCF_CORE_MAX) + continue; + + cleaning_policy_acp_init_cache_block(cache, cline); + + if (!metadata_test_dirty(cache, cline)) + continue; + + cleaning_policy_acp_set_hot_cache_line(cache, cline); + } + + ocf_cache_log(cache, log_info, "Finished rebuilding ACP metadata\n"); +} + +void cleaning_policy_acp_setup(struct ocf_cache *cache) +{ + struct acp_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data; + + config->thread_wakeup_time = OCF_ACP_DEFAULT_WAKE_UP; + config->flush_max_buffers = OCF_ACP_DEFAULT_FLUSH_MAX_BUFFERS; +} + +int cleaning_policy_acp_initialize(struct ocf_cache *cache, + int init_metadata) +{ + struct acp_context *acp; + int err, i; + + /* bug if max chunk number would overflow dirty_no array type */ +#if defined (BUILD_BUG_ON) + BUILD_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >= + 1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8)); +#else + ENV_BUG_ON(ACP_CHUNK_SIZE / ocf_cache_line_size_min >= + 1U << (sizeof(acp->chunk_info[0][0].num_dirty) * 8)); +#endif + + ENV_BUG_ON(cache->cleaning_policy_context); + + cache->cleaning_policy_context = env_vzalloc(sizeof(struct acp_context)); + if (!cache->cleaning_policy_context) { + ocf_cache_log(cache, log_err, "acp context allocation error\n"); + return -OCF_ERR_NO_MEM; + } + acp = cache->cleaning_policy_context; + + env_rwsem_init(&acp->chunks_lock); + + for (i = 0; i < ACP_MAX_BUCKETS; i++) { + INIT_LIST_HEAD(&acp->bucket_info[i].chunk_list); + acp->bucket_info[i].threshold = + ((ACP_CHUNK_SIZE/ocf_line_size(cache)) * + ACP_BUCKET_DEFAULTS[i]) / 100; + } + + if (cache->conf_meta->core_obj_count > 0) { + err = _acp_load_cores(cache); + if (err) { + cleaning_policy_acp_deinitialize(cache); + return err; + } + } + + _acp_rebuild(cache); + + return 0; +} + +int cleaning_policy_acp_set_cleaning_param(ocf_cache_t cache, + uint32_t param_id, uint32_t param_value) +{ + struct acp_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data; + + switch (param_id) { + case ocf_acp_wake_up_time: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ACP_MIN_WAKE_UP, + OCF_ACP_MAX_WAKE_UP, + "thread_wakeup_time"); + config->thread_wakeup_time = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread " + "wake-up time: %d\n", config->thread_wakeup_time); + break; + case ocf_acp_flush_max_buffers: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ACP_MIN_FLUSH_MAX_BUFFERS, + OCF_ACP_MAX_FLUSH_MAX_BUFFERS, + "flush_max_buffers"); + config->flush_max_buffers = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread max " + "buffers flushed per iteration: %d\n", + config->flush_max_buffers); + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + +int cleaning_policy_acp_get_cleaning_param(ocf_cache_t cache, + uint32_t param_id, uint32_t *param_value) +{ + struct acp_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data; + + switch (param_id) { + case ocf_acp_flush_max_buffers: + *param_value = config->flush_max_buffers; + break; + case ocf_acp_wake_up_time: + *param_value = config->thread_wakeup_time; + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + + +/* attempt to lock cache line if it's dirty */ +static ocf_cache_line_t _acp_trylock_dirty(struct ocf_cache *cache, + uint32_t core_id, uint64_t core_line) +{ + struct ocf_map_info info; + bool locked = false; + + OCF_METADATA_LOCK_RD(); + + ocf_engine_lookup_map_entry(cache, &info, core_id, + core_line); + + if (info.status == LOOKUP_HIT && + metadata_test_dirty(cache, info.coll_idx) && + ocf_cache_line_try_lock_rd(cache, info.coll_idx)) { + locked = true; + } + + OCF_METADATA_UNLOCK_RD(); + + return locked ? info.coll_idx : cache->device->collision_table_entries; +} + +static void _acp_handle_flush_error(struct ocf_cache *cache, + struct acp_context *acp) +{ + struct acp_flush_context *flush = &acp->flush; + + flush->chunk->next_cleaning_timestamp = env_get_tick_count() + + env_secs_to_ticks(ACP_CHUNK_CLEANING_BACKOFF_TIME); + + if (ocf_cache_log_rl(cache)) { + ocf_core_log(&cache->core_obj[flush->chunk->core_id], + log_err, "Cleaning error (%d) in range" + " <%llu; %llu) backing off for %u seconds\n", + flush->error, + flush->chunk->chunk_id * ACP_CHUNK_SIZE, + (flush->chunk->chunk_id * ACP_CHUNK_SIZE) + + ACP_CHUNK_SIZE, + ACP_CHUNK_CLEANING_BACKOFF_TIME); + } +} + +/* called after flush request completed */ +static void _acp_flush_end( + struct ocf_cache *cache, + struct acp_context *acp) +{ + struct acp_flush_context *flush = &acp->flush; + int i; + + for (i = 0; i < flush->size; i++) { + ocf_cache_line_unlock_rd(cache, flush->data[i].cache_line); + ACP_DEBUG_END(acp, flush->data[i].cache_line); + } + + if (flush->error) + _acp_handle_flush_error(cache, acp); +} + +/* flush data */ +static void _acp_flush(struct ocf_cache *cache, struct acp_context *acp, + uint32_t io_queue, struct acp_flush_context *flush) +{ + struct ocf_cleaner_attribs attribs = { + .cache_line_lock = false, + .metadata_locked = false, + .do_sort = false, + .io_queue = io_queue, + }; + + flush->error = ocf_cleaner_do_flush_data(cache, flush->data, + flush->size, &attribs); + + _acp_flush_end(cache, acp); +} + +static inline bool _acp_can_clean_chunk(struct ocf_cache *cache, + struct acp_chunk_info *chunk) +{ + /* Check if core device is opened and if timeout after cleaning error + * expired or wasn't set in the first place */ + return (cache->core_obj[chunk->core_id].opened && + (chunk->next_cleaning_timestamp > env_get_tick_count() || + !chunk->next_cleaning_timestamp)); +} + +static struct acp_chunk_info *_acp_get_cleaning_candidate( + struct ocf_cache *cache) +{ + int i; + struct acp_chunk_info *cur; + struct acp_context *acp = cache->cleaning_policy_context; + + ACP_LOCK_CHUNKS_RD(); + + /* go through all buckets in descending order, excluding bucket 0 which + * is supposed to contain all clean chunks */ + for (i = ACP_MAX_BUCKETS - 1; i > 0; i--) { + list_for_each_entry(cur, &acp->bucket_info[i].chunk_list, list) { + if (_acp_can_clean_chunk(cache, cur)) { + ACP_UNLOCK_CHUNKS_RD(); + return cur; + } + } + } + + ACP_UNLOCK_CHUNKS_RD(); + return NULL; +} + +#define CHUNK_FINISHED -1 + +/* clean at most 'flush_max_buffers' cache lines from given chunk, starting + * at given cache line */ +static int _acp_clean(struct ocf_cache *cache, uint32_t io_queue, + struct acp_chunk_info *chunk, unsigned start, + uint32_t flush_max_buffers) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + size_t lines_per_chunk = ACP_CHUNK_SIZE / + ocf_line_size(cache); + uint64_t first_core_line = chunk->chunk_id * lines_per_chunk; + unsigned i; + + OCF_DEBUG_PARAM(cache, "lines per chunk %llu chunk %llu " + "first_core_line %llu\n", + (uint64_t)lines_per_chunk, + chunk->chunk_id, + first_core_line); + + ACP_DEBUG_INIT(acp); + + acp->flush.size = 0; + acp->flush.chunk = chunk; + for (i = start; i < lines_per_chunk && acp->flush.size < flush_max_buffers ; i++) { + uint64_t core_line = first_core_line + i; + ocf_cache_line_t cache_line; + + cache_line = _acp_trylock_dirty(cache, chunk->core_id, core_line); + if (cache_line == cache->device->collision_table_entries) + continue; + + acp->flush.data[acp->flush.size].core_id = chunk->core_id; + acp->flush.data[acp->flush.size].core_line = core_line; + acp->flush.data[acp->flush.size].cache_line = cache_line; + acp->flush.size++; + ACP_DEBUG_BEGIN(acp, cache_line); + } + + if (acp->flush.size > 0) { + _acp_flush(cache, acp, io_queue, &acp->flush); + } + + ACP_DEBUG_CHECK(acp); + + return (i == lines_per_chunk) ? CHUNK_FINISHED : i; +} + +#define NOTHING_TO_CLEAN 0 +#define MORE_TO_CLEAN 1 + +/* Clean at most 'flush_max_buffers' cache lines from current or newly + * selected chunk */ +static int _acp_clean_iteration(struct ocf_cache *cache, uint32_t io_queue, + uint32_t flush_max_buffers) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + struct acp_state *state = &acp->state; + + if (!state->in_progress) { + /* get next chunk to clean */ + state->chunk = _acp_get_cleaning_candidate(cache); + + if (!state->chunk) { + /* nothing co clean */ + return NOTHING_TO_CLEAN; + } + + /* new cleaning cycle - reset state */ + state->iter = 0; + state->in_progress = true; + } + + state->iter = _acp_clean(cache, io_queue, state->chunk, state->iter, + flush_max_buffers); + + if (state->iter == CHUNK_FINISHED) { + /* reached end of chunk - reset state */ + state->in_progress = false; + } + + + return MORE_TO_CLEAN; +} + +int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache, + uint32_t io_queue) +{ + struct acp_cleaning_policy_config *config; + int ret; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_acp].data; + + if (NOTHING_TO_CLEAN == _acp_clean_iteration(cache, io_queue, + config->flush_max_buffers)) { + ret = ACP_BACKOFF_TIME_MS; + } else { + ret = config->thread_wakeup_time; + } + + return ret; +} + +static void _acp_update_bucket(struct acp_context *acp, + struct acp_chunk_info *chunk) +{ + struct acp_bucket *bucket = &acp->bucket_info[chunk->bucket_id]; + + if (chunk->num_dirty > bucket->threshold) { + ENV_BUG_ON(chunk->bucket_id == ACP_MAX_BUCKETS - 1); + + chunk->bucket_id++; + /* buckets are stored in array, move up one bucket. + * No overflow here. ENV_BUG_ON made sure of no incrementation on + * last bucket */ + bucket++; + + list_move_tail(&chunk->list, &bucket->chunk_list); + } else if (chunk->bucket_id && + chunk->num_dirty <= (bucket - 1)->threshold) { + chunk->bucket_id--; + /* move down one bucket, we made sure we won't underflow */ + bucket--; + + list_move(&chunk->list, &bucket->chunk_list); + } +} + +void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + struct cleaning_policy_meta policy_meta; + struct acp_cleaning_policy_meta *acp_meta; + struct acp_chunk_info *chunk; + + ACP_LOCK_CHUNKS_WR(); + + acp_meta = _acp_meta_get(cache, cache_line, &policy_meta); + chunk = _acp_get_chunk(cache, cache_line); + + if (!acp_meta->dirty) { + acp_meta->dirty = 1; + _acp_meta_set(cache, cache_line, &policy_meta); + chunk->num_dirty++; + } + + _acp_update_bucket(acp, chunk); + + ACP_UNLOCK_CHUNKS_WR(); +} + +void cleaning_policy_acp_purge_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + struct cleaning_policy_meta policy_meta; + struct acp_cleaning_policy_meta *acp_meta; + struct acp_chunk_info *chunk; + + acp_meta = _acp_meta_get(cache, cache_line, &policy_meta); + chunk = _acp_get_chunk(cache, cache_line); + + if (acp_meta->dirty) { + acp_meta->dirty = 0; + _acp_meta_set(cache, cache_line, &policy_meta); + chunk->num_dirty--; + } + + _acp_update_bucket(acp, chunk); +} + +int cleaning_policy_acp_purge_range(struct ocf_cache *cache, + int core_id, uint64_t start_byte, uint64_t end_byte) +{ + return ocf_metadata_actor(cache, PARTITION_INVALID, + core_id, start_byte, end_byte, + cleaning_policy_acp_purge_block); +} + +void cleaning_policy_acp_remove_core(ocf_cache_t cache, + ocf_core_id_t core_id) +{ + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + uint64_t i; + + ENV_BUG_ON(acp->chunks_total < acp->num_chunks[core_id]); + + if (acp->state.in_progress && acp->state.chunk->core_id == core_id) { + acp->state.in_progress = false; + acp->state.iter = 0; + acp->state.chunk = NULL; + } + + ACP_LOCK_CHUNKS_WR(); + + for (i = 0; i < acp->num_chunks[core_id]; i++) + list_del(&acp->chunk_info[core_id][i].list); + + acp->chunks_total -= acp->num_chunks[core_id]; + acp->num_chunks[core_id] = 0; + + env_vfree(acp->chunk_info[core_id]); + acp->chunk_info[core_id] = NULL; + + ACP_UNLOCK_CHUNKS_WR(); +} + +int cleaning_policy_acp_add_core(ocf_cache_t cache, + ocf_core_id_t core_id) +{ + uint64_t core_size = cache->core_conf_meta[core_id].length; + uint64_t num_chunks = DIV_ROUND_UP(core_size, ACP_CHUNK_SIZE); + struct acp_context *acp = _acp_get_ctx_from_cache(cache); + int i; + + OCF_DEBUG_PARAM(cache, "%s core_id %llu num_chunks %llu\n", + __func__, (uint64_t)core_id, (uint64_t) num_chunks); + + ACP_LOCK_CHUNKS_WR(); + + ENV_BUG_ON(acp->chunk_info[core_id]); + + acp->chunk_info[core_id] = + env_vzalloc(num_chunks * sizeof(acp->chunk_info[0][0])); + + if (!acp->chunk_info[core_id]) { + ACP_UNLOCK_CHUNKS_WR(); + OCF_DEBUG_PARAM(cache, "failed to allocate acp tables\n"); + return -ENOMEM; + } + + OCF_DEBUG_PARAM(cache, "successfully allocated acp tables\n"); + + /* increment counters */ + acp->num_chunks[core_id] = num_chunks; + acp->chunks_total += num_chunks; + + for (i = 0; i < acp->num_chunks[core_id]; i++) { + /* fill in chunk metadata and add to the clean bucket */ + acp->chunk_info[core_id][i].core_id = core_id; + acp->chunk_info[core_id][i].chunk_id = i; + list_add(&acp->chunk_info[core_id][i].list, + &acp->bucket_info[0].chunk_list); + } + + ACP_UNLOCK_CHUNKS_WR(); + + return 0; +} diff --git a/src/cleaning/acp.h b/src/cleaning/acp.h new file mode 100644 index 0000000..f2b6c9d --- /dev/null +++ b/src/cleaning/acp.h @@ -0,0 +1,45 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __LAYER_CLEANING_POLICY_AGGRESSIVE_H__ + +#define __LAYER_CLEANING_POLICY_AGGRESSIVE_H__ + +#include "cleaning.h" + +void cleaning_policy_acp_setup(struct ocf_cache *cache); + +int cleaning_policy_acp_initialize(struct ocf_cache *cache, + int init_metadata); + +void cleaning_policy_acp_deinitialize(struct ocf_cache *cache); + +int cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache, + uint32_t io_queue); + +void cleaning_policy_acp_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line); + +void cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line); + +void cleaning_policy_acp_purge_block(struct ocf_cache *cache, + uint32_t cache_line); + +int cleaning_policy_acp_purge_range(struct ocf_cache *cache, + int core_id, uint64_t start_byte, uint64_t end_byte); + +int cleaning_policy_acp_set_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t param_value); + +int cleaning_policy_acp_get_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t *param_value); + +int cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id); + +void cleaning_policy_acp_remove_core(ocf_cache_t cache, + ocf_core_id_t core_id); + +#endif + diff --git a/src/cleaning/acp_structs.h b/src/cleaning/acp_structs.h new file mode 100644 index 0000000..1bd9406 --- /dev/null +++ b/src/cleaning/acp_structs.h @@ -0,0 +1,23 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __CLEANING_AGGRESSIVE_STRUCTS_H__ +#define __CLEANING_AGGRESSIVE_STRUCTS_H__ + +#include "../utils/utils_cleaner.h" + +/* TODO: remove acp metadata */ +struct acp_cleaning_policy_meta { + uint8_t dirty : 1; +}; + +/* cleaning policy per partition metadata */ +struct acp_cleaning_policy_config { + uint32_t thread_wakeup_time; /* in milliseconds*/ + uint32_t flush_max_buffers; /* in lines */ +}; + +#endif + + diff --git a/src/cleaning/alru.c b/src/cleaning/alru.c new file mode 100644 index 0000000..454f8a9 --- /dev/null +++ b/src/cleaning/alru.c @@ -0,0 +1,802 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "cleaning.h" +#include "alru.h" +#include "../metadata/metadata.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_part.h" +#include "../utils/utils_allocator.h" +#include "../concurrency/ocf_cache_concurrency.h" +#include "../ocf_def_priv.h" +#include "cleaning_priv.h" + +#define is_alru_head(x) (x == collision_table_entries) +#define is_alru_tail(x) (x == collision_table_entries) + +#define OCF_CLEANING_DEBUG 0 + +#if 1 == OCF_CLEANING_DEBUG + +#define OCF_DEBUG_PREFIX "[Clean] %s():%d " + +#define OCF_DEBUG_LOG(cache, format, ...) \ + ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \ + format"\n", __func__, __LINE__, ##__VA_ARGS__) + +#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "") + +#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \ + ##__VA_ARGS__) + +#else +#define OCF_DEBUG_PREFIX +#define OCF_DEBUG_LOG(cache, format, ...) +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +struct flush_merge_struct { + ocf_cache_line_t cache_line; + ocf_core_id_t core_id; + uint64_t core_sector; +}; + +/* -- Start of ALRU functions -- */ + + +/* Sets the given collision_index as the new _head_ of the ALRU list. */ +static inline void update_alru_head(struct ocf_cache *cache, + int partition_id, unsigned int collision_index) +{ + struct ocf_user_part *part = &cache->user_parts[partition_id]; + + part->runtime->cleaning.policy.alru.lru_head = collision_index; +} + +/* Sets the given collision_index as the new _tail_ of the ALRU list. */ +static inline void update_alru_tail(struct ocf_cache *cache, + int partition_id, unsigned int collision_index) +{ + struct ocf_user_part *part = &cache->user_parts[partition_id]; + + part->runtime->cleaning.policy.alru.lru_tail = collision_index; +} + +/* Sets the given collision_index as the new _head_ and _tail_ + * of the ALRU list. + */ +static inline void update_alru_head_tail(struct ocf_cache *cache, + int partition_id, unsigned int collision_index) +{ + update_alru_head(cache, partition_id, collision_index); + update_alru_tail(cache, partition_id, collision_index); +} + + +/* Adds the given collision_index to the _head_ of the ALRU list */ +static void add_alru_head(struct ocf_cache *cache, int partition_id, + unsigned int collision_index) +{ + unsigned int curr_head_index; + unsigned int collision_table_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[partition_id]; + struct cleaning_policy_meta policy; + + ENV_BUG_ON(!(collision_index < collision_table_entries)); + + ENV_BUG_ON(env_atomic_read( + &part->runtime->cleaning.policy.alru.size) < 0); + + ENV_WARN_ON(!metadata_test_dirty(cache, collision_index)); + ENV_WARN_ON(!metadata_test_valid_any(cache, collision_index)); + + /* First node to be added/ */ + if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) { + update_alru_head_tail(cache, partition_id, collision_index); + + ocf_metadata_get_cleaning_policy(cache, collision_index, + &policy); + policy.meta.alru.lru_next = collision_table_entries; + policy.meta.alru.lru_prev = collision_table_entries; + policy.meta.alru.timestamp = env_ticks_to_secs( + env_get_tick_count()); + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + } else { + /* Not the first node to be added. */ + + curr_head_index = part->runtime->cleaning.policy.alru.lru_head; + + ENV_BUG_ON(!(curr_head_index < collision_table_entries)); + + ocf_metadata_get_cleaning_policy(cache, collision_index, + &policy); + policy.meta.alru.lru_next = curr_head_index; + policy.meta.alru.lru_prev = collision_table_entries; + policy.meta.alru.timestamp = env_ticks_to_secs( + env_get_tick_count()); + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + + ocf_metadata_get_cleaning_policy(cache, curr_head_index, + &policy); + policy.meta.alru.lru_prev = collision_index; + ocf_metadata_set_cleaning_policy(cache, curr_head_index, + &policy); + + update_alru_head(cache, partition_id, collision_index); + } + + env_atomic_inc(&part->runtime->cleaning.policy.alru.size); +} + +/* Deletes the node with the given collision_index from the ALRU list */ +static void remove_alru_list(struct ocf_cache *cache, int partition_id, + unsigned int collision_index) +{ + uint32_t prev_lru_node, next_lru_node; + uint32_t collision_table_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[partition_id]; + struct alru_cleaning_policy *cleaning_policy = + &part->runtime->cleaning.policy.alru; + struct cleaning_policy_meta policy; + + ENV_BUG_ON(!(collision_index < collision_table_entries)); + + if (env_atomic_read(&part->runtime->cleaning.policy.alru.size) == 0) { + ocf_cache_log(cache, log_err, "ERROR: Attempt to remove item " + "from empty ALRU Cleaning Policy queue!\n"); + ENV_BUG(); + } + + ocf_metadata_get_cleaning_policy(cache, collision_index, &policy); + + /* Set prev and next (even if non existent) */ + next_lru_node = policy.meta.alru.lru_next; + prev_lru_node = policy.meta.alru.lru_prev; + + /* Check if entry is not part of the ALRU list */ + if ((next_lru_node == collision_table_entries) && + (prev_lru_node == collision_table_entries) && + (cleaning_policy->lru_head != collision_index) && + (cleaning_policy->lru_tail != collision_index)) { + return; + } + + /* Case 0: If we are head AND tail, there is only one node. So unlink + * node and set that there is no node left in the list. + */ + if (cleaning_policy->lru_head == collision_index && + cleaning_policy->lru_tail == collision_index) { + policy.meta.alru.lru_next = collision_table_entries; + policy.meta.alru.lru_prev = collision_table_entries; + + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + + update_alru_head_tail(cache, partition_id, + collision_table_entries); + } + + /* Case 1: else if this collision_index is ALRU head, but not tail, + * update head and return + */ + else if ((cleaning_policy->lru_tail != collision_index) && + (cleaning_policy->lru_head == collision_index)) { + struct cleaning_policy_meta next_policy; + + ENV_BUG_ON(!(next_lru_node < collision_table_entries)); + + ocf_metadata_get_cleaning_policy(cache, next_lru_node, + &next_policy); + + update_alru_head(cache, partition_id, next_lru_node); + + policy.meta.alru.lru_next = collision_table_entries; + next_policy.meta.alru.lru_prev = collision_table_entries; + + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + ocf_metadata_set_cleaning_policy(cache, next_lru_node, + &next_policy); + } + + /* Case 2: else if this collision_index is ALRU tail, but not head, + * update tail and return + */ + else if ((cleaning_policy->lru_head != collision_index) && + (cleaning_policy->lru_tail == collision_index)) { + struct cleaning_policy_meta prev_policy; + + ENV_BUG_ON(!(prev_lru_node < collision_table_entries)); + + ocf_metadata_get_cleaning_policy(cache, prev_lru_node, + &prev_policy); + + update_alru_tail(cache, partition_id, prev_lru_node); + + policy.meta.alru.lru_prev = collision_table_entries; + prev_policy.meta.alru.lru_next = collision_table_entries; + + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + ocf_metadata_set_cleaning_policy(cache, prev_lru_node, + &prev_policy); + } + + /* Case 3: else this collision_index is a middle node. There is no + * change to the head and the tail pointers. + */ + else { + struct cleaning_policy_meta next_policy; + struct cleaning_policy_meta prev_policy; + + ENV_BUG_ON(!(next_lru_node < collision_table_entries)); + ENV_BUG_ON(!(prev_lru_node < collision_table_entries)); + + ocf_metadata_get_cleaning_policy(cache, prev_lru_node, + &prev_policy); + ocf_metadata_get_cleaning_policy(cache, next_lru_node, + &next_policy); + + /* Update prev and next nodes */ + prev_policy.meta.alru.lru_next = policy.meta.alru.lru_next; + next_policy.meta.alru.lru_prev = policy.meta.alru.lru_prev; + + /* Update the given node */ + policy.meta.alru.lru_next = collision_table_entries; + policy.meta.alru.lru_prev = collision_table_entries; + + ocf_metadata_set_cleaning_policy(cache, collision_index, + &policy); + ocf_metadata_set_cleaning_policy(cache, prev_lru_node, + &prev_policy); + ocf_metadata_set_cleaning_policy(cache, next_lru_node, + &next_policy); + } + + env_atomic_dec(&part->runtime->cleaning.policy.alru.size); +} + +static bool is_on_alru_list(struct ocf_cache *cache, int partition_id, + unsigned int collision_index) +{ + uint32_t prev_lru_node, next_lru_node; + uint32_t collision_table_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[partition_id]; + struct alru_cleaning_policy *cleaning_policy = + &part->runtime->cleaning.policy.alru; + struct cleaning_policy_meta policy; + + ENV_BUG_ON(!(collision_index < collision_table_entries)); + + ocf_metadata_get_cleaning_policy(cache, collision_index, &policy); + + next_lru_node = policy.meta.alru.lru_next; + prev_lru_node = policy.meta.alru.lru_prev; + + return cleaning_policy->lru_tail == collision_index || + cleaning_policy->lru_head == collision_index || + next_lru_node != collision_table_entries || + prev_lru_node != collision_table_entries; +} + + +/* -- End of ALRU functions -- */ + +void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + struct cleaning_policy_meta policy; + + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); + + policy.meta.alru.timestamp = 0; + policy.meta.alru.lru_prev = cache->device->collision_table_entries; + policy.meta.alru.lru_next = cache->device->collision_table_entries; + + ocf_metadata_set_cleaning_policy(cache, cache_line, &policy); +} + +void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, + cache_line); + + remove_alru_list(cache, part_id, cache_line); +} + +static void __cleaning_policy_alru_purge_cache_block_any( + struct ocf_cache *cache, uint32_t cache_line) +{ + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, + cache_line); + + if (is_on_alru_list(cache, part_id, cache_line)) + remove_alru_list(cache, part_id, cache_line); +} + +int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id, + uint64_t start_byte, uint64_t end_byte) { + struct ocf_user_part *part; + ocf_part_id_t part_id; + int ret = 0; + + for_each_part(cache, part, part_id) { + if (env_atomic_read(&part->runtime->cleaning. + policy.alru.size) == 0) + continue; + + ret |= ocf_metadata_actor(cache, part_id, + core_id, start_byte, end_byte, + __cleaning_policy_alru_purge_cache_block_any); + } + + return ret; +} + +void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line) +{ + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, + cache_line); + struct ocf_user_part *part = &cache->user_parts[part_id]; + + uint32_t prev_lru_node, next_lru_node; + uint32_t collision_table_entries = cache->device->collision_table_entries; + struct cleaning_policy_meta policy; + + ENV_WARN_ON(!metadata_test_dirty(cache, cache_line)); + ENV_WARN_ON(!metadata_test_valid_any(cache, cache_line)); + + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); + next_lru_node = policy.meta.alru.lru_next; + prev_lru_node = policy.meta.alru.lru_prev; + + if ((next_lru_node != collision_table_entries) || + (prev_lru_node != collision_table_entries) || + ((part->runtime->cleaning.policy. + alru.lru_head == cache_line) && + (part->runtime->cleaning.policy. + alru.lru_tail == cache_line))) + remove_alru_list(cache, part_id, cache_line); + + add_alru_head(cache, part_id, cache_line); +} + +static void _alru_rebuild(struct ocf_cache *cache) +{ + struct ocf_user_part *part; + ocf_part_id_t part_id; + ocf_core_id_t core_id; + ocf_cache_line_t cline; + uint32_t step = 0; + + for_each_part(cache, part, part_id) { + /* ALRU initialization */ + env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0); + part->runtime->cleaning.policy.alru.lru_head = + cache->device->collision_table_entries; + part->runtime->cleaning.policy.alru.lru_tail = + cache->device->collision_table_entries; + cache->device->runtime_meta->cleaning_thread_access = 0; + } + + for (cline = 0; cline < cache->device->collision_table_entries; cline++) { + ocf_metadata_get_core_and_part_id(cache, cline, &core_id, + NULL); + + OCF_COND_RESCHED_DEFAULT(step); + + if (core_id == OCF_CORE_MAX) + continue; + + cleaning_policy_alru_init_cache_block(cache, cline); + + if (!metadata_test_dirty(cache, cline)) + continue; + + cleaning_policy_alru_set_hot_cache_line(cache, cline); + } +} + +static int cleaning_policy_alru_initialize_part(struct ocf_cache *cache, + struct ocf_user_part *part, int init_metadata) +{ + + if (init_metadata) { + /* ALRU initialization */ + env_atomic_set(&part->runtime->cleaning.policy.alru.size, 0); + part->runtime->cleaning.policy.alru.lru_head = + cache->device->collision_table_entries; + part->runtime->cleaning.policy.alru.lru_tail = + cache->device->collision_table_entries; + } + + cache->device->runtime_meta->cleaning_thread_access = 0; + + return 0; +} + +void cleaning_policy_alru_setup(struct ocf_cache *cache) +{ + struct alru_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + config->thread_wakeup_time = OCF_ALRU_DEFAULT_WAKE_UP; + config->stale_buffer_time = OCF_ALRU_DEFAULT_STALENESS_TIME; + config->flush_max_buffers = OCF_ALRU_DEFAULT_FLUSH_MAX_BUFFERS; + config->activity_threshold = OCF_ALRU_DEFAULT_ACTIVITY_THRESHOLD; +} + +int cleaning_policy_alru_initialize(struct ocf_cache *cache, int init_metadata) +{ + struct ocf_user_part *part; + ocf_part_id_t part_id; + + for_each_part(cache, part, part_id) { + cleaning_policy_alru_initialize_part(cache, + part, init_metadata); + } + + if (init_metadata) + _alru_rebuild(cache); + + return 0; +} + +int cleaning_policy_alru_set_cleaning_param(ocf_cache_t cache, + uint32_t param_id, uint32_t param_value) +{ + struct alru_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + switch (param_id) { + case ocf_alru_wake_up_time: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ALRU_MIN_WAKE_UP, + OCF_ALRU_MAX_WAKE_UP, + "thread_wakeup_time"); + config->thread_wakeup_time = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread " + "wake-up time: %d\n", config->thread_wakeup_time); + break; + case ocf_alru_stale_buffer_time: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ALRU_MIN_STALENESS_TIME, + OCF_ALRU_MAX_STALENESS_TIME, + "stale_buffer_time"); + config->stale_buffer_time = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread " + "staleness time: %d\n", config->stale_buffer_time); + break; + case ocf_alru_flush_max_buffers: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ALRU_MIN_FLUSH_MAX_BUFFERS, + OCF_ALRU_MAX_FLUSH_MAX_BUFFERS, + "flush_max_buffers"); + config->flush_max_buffers = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread max " + "buffers flushed per iteration: %d\n", + config->flush_max_buffers); + break; + case ocf_alru_activity_threshold: + OCF_CLEANING_CHECK_PARAM(cache, param_value, + OCF_ALRU_MIN_ACTIVITY_THRESHOLD, + OCF_ALRU_MAX_ACTIVITY_THRESHOLD, + "activity_threshold"); + config->activity_threshold = param_value; + ocf_cache_log(cache, log_info, "Write-back flush thread " + "activity time threshold: %d\n", + config->activity_threshold); + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + +int cleaning_policy_alru_get_cleaning_param(ocf_cache_t cache, + uint32_t param_id, uint32_t *param_value) +{ + struct alru_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + switch (param_id) { + case ocf_alru_wake_up_time: + *param_value = config->thread_wakeup_time; + break; + case ocf_alru_stale_buffer_time: + *param_value = config->stale_buffer_time; + break; + case ocf_alru_flush_max_buffers: + *param_value = config->flush_max_buffers; + break; + case ocf_alru_activity_threshold: + *param_value = config->activity_threshold; + break; + default: + return -OCF_ERR_INVAL; + } + + return 0; +} + +static inline uint32_t compute_timestamp( + const struct alru_cleaning_policy_config *config) +{ + unsigned long time; + + time = env_get_tick_count(); + time -= env_secs_to_ticks(config->stale_buffer_time); + time = env_ticks_to_secs(time); + + return (uint32_t) time; +} + +static int check_for_io_activity(struct ocf_cache *cache, + struct alru_cleaning_policy_config *config) +{ + unsigned int now, last; + + now = env_ticks_to_msecs(env_get_tick_count()); + last = env_atomic_read(&cache->last_access_ms); + + if ((now - last) < config->activity_threshold) + return 1; + return 0; +} + +static int cmp_ocf_user_parts(const void *p1, const void *p2) { + const struct ocf_user_part *t1 = *(const struct ocf_user_part**)p1; + const struct ocf_user_part *t2 = *(const struct ocf_user_part**)p2; + + if (t1->config->priority > t2->config->priority) + return 1; + else if (t1->config->priority < t2->config->priority) + return -1; + + return 0; +} + +static void swp_ocf_user_part(void *part1, void *part2, int size) { + void *tmp = *(void **)part1; + + *(void **)part1 = *(void **) part2; + *(void **)part2 = tmp; +} + +static void get_parts_sorted(struct ocf_user_part **parts, + struct ocf_cache *cache) { + int i; + + for (i = 0; i < OCF_IO_CLASS_MAX; i++) + parts[i] = &cache->user_parts[i]; + + env_sort(parts, OCF_IO_CLASS_MAX, sizeof(struct ocf_user_part*), + cmp_ocf_user_parts, swp_ocf_user_part); +} + +static int clean_later(ocf_cache_t cache, uint32_t *delta) +{ + struct alru_cleaning_policy_config *config; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + *delta = env_ticks_to_secs(env_get_tick_count()) - + cache->device->runtime_meta->cleaning_thread_access; + if (*delta <= config->thread_wakeup_time) + return true; + + return false; +} + +static void get_block_to_flush(struct flush_data* dst, + ocf_cache_line_t cache_line, struct ocf_cache* cache) +{ + ocf_core_id_t core_id; + uint64_t core_line; + + ocf_metadata_get_core_info(cache, cache_line, + &core_id, &core_line); + + dst->cache_line = cache_line; + dst->core_id = core_id; + dst->core_line = core_line; +} + +static int more_blocks_to_flush(struct ocf_cache *cache, + ocf_cache_line_t cache_line, uint32_t last_access) +{ + struct cleaning_policy_meta policy; + + if (cache_line >= cache->device->collision_table_entries) + return false; + + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); + + if (policy.meta.alru.timestamp >= last_access) + return false; + + return true; +} + +static int block_is_busy(struct ocf_cache *cache, + ocf_cache_line_t cache_line) +{ + ocf_core_id_t core_id; + uint64_t core_line; + + ocf_metadata_get_core_info(cache, cache_line, + &core_id, &core_line); + + if (!cache->core_obj[core_id].opened) + return true; + + if (ocf_cache_line_is_used(cache, cache_line)) + return true; + + return false; +} + +static int get_data_to_flush(struct flush_data *dst, uint32_t clines_no, + struct ocf_cache *cache, struct ocf_user_part *part) +{ + struct alru_cleaning_policy_config *config; + struct cleaning_policy_meta policy; + ocf_cache_line_t cache_line; + int to_flush = 0; + uint32_t last_access; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + cache_line = part->runtime->cleaning.policy.alru.lru_tail; + + last_access = compute_timestamp(config); + + OCF_DEBUG_PARAM(cache, "Last access=%u, timestamp=%u rel=%d", + last_access, policy.meta.alru.timestamp, + policy.meta.alru.timestamp < last_access); + + while (to_flush < clines_no && + more_blocks_to_flush(cache, cache_line, last_access)) { + if (!block_is_busy(cache, cache_line)) { + get_block_to_flush(&dst[to_flush], cache_line, cache); + to_flush++; + } + + ocf_metadata_get_cleaning_policy(cache, cache_line, &policy); + cache_line = policy.meta.alru.lru_prev; + } + + OCF_DEBUG_PARAM(cache, "Collected items_to_clean=%u", to_flush); + + return to_flush; +} + +static int perform_flushing(int clines_no, struct ocf_cache *cache, uint32_t io_queue, + struct flush_data *flush_data, struct ocf_user_part *part) +{ + int to_clean = get_data_to_flush(flush_data, clines_no, cache, part); + + if (to_clean > 0) { + struct ocf_cleaner_attribs attribs = { + .cache_line_lock = true, + .metadata_locked = true, + .do_sort = true, + .io_queue = io_queue + }; + + ocf_cleaner_do_flush_data(cache, flush_data, + to_clean, &attribs); + } else { + /* Update timestamp only if there are no items to be cleaned */ + cache->device->runtime_meta->cleaning_thread_access = + env_ticks_to_secs(env_get_tick_count()); + } + + return to_clean; +} + +static int is_cleanup_possible(ocf_cache_t cache) +{ + struct alru_cleaning_policy_config *config; + uint32_t delta; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + if (check_for_io_activity(cache, config)) { + OCF_DEBUG_PARAM(cache, "IO activity detected"); + return false; + } + + if (clean_later(cache, &delta)) { + OCF_DEBUG_PARAM(cache, + "Cleaning policy configured to clean later " + "delta=%u wake_up=%u", delta, + config->thread_wakeup_time); + return false; + } + + //Cleaning policy configured to not clean anything + if (config->flush_max_buffers == 0) + return false; + + return true; +} + +static int cleanup(struct ocf_cache *cache, uint32_t clines_no, + struct ocf_user_part *part, uint32_t io_queue) +{ + struct flush_data *flush_data; + size_t flush_data_limit; + int flushed_blocks = 0; + + if (!is_cleanup_possible(cache)) + return flushed_blocks; + + if (OCF_METADATA_LOCK_WR_TRY()) + return flushed_blocks; + + OCF_REALLOC_INIT(&flush_data, &flush_data_limit); + OCF_REALLOC(&flush_data, sizeof(flush_data[0]), clines_no, + &flush_data_limit); + + if (!flush_data) { + OCF_METADATA_UNLOCK_WR(); + ocf_cache_log(cache, log_warn, "No memory to allocate flush " + "data for ALRU cleaning policy"); + return flushed_blocks; + } + + flushed_blocks = perform_flushing(clines_no, cache, io_queue, + flush_data, part); + + OCF_METADATA_UNLOCK_WR(); + + OCF_REALLOC_DEINIT(&flush_data, &flush_data_limit); + + return flushed_blocks; +} + +int cleaning_alru_perform_cleaning(ocf_cache_t cache, uint32_t io_queue) +{ + struct ocf_user_part *parts[OCF_IO_CLASS_MAX]; + int part_id = OCF_IO_CLASS_MAX - 1; + struct alru_cleaning_policy_config *config; + uint32_t clines_no; + + config = (void *)&cache->conf_meta->cleaning[ocf_cleaning_alru].data; + + get_parts_sorted(parts, cache); + + clines_no = config->flush_max_buffers; + + while (part_id >= 0) { + clines_no -= cleanup(cache, clines_no, + parts[part_id], io_queue); + + if (clines_no > 0) + part_id--; + else + break; + } + + if (clines_no > 0) + return config->thread_wakeup_time * 1000; + + return 0; +} diff --git a/src/cleaning/alru.h b/src/cleaning/alru.h new file mode 100644 index 0000000..fc48ac4 --- /dev/null +++ b/src/cleaning/alru.h @@ -0,0 +1,30 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __LAYER_CLEANING_POLICY_ALRU_H__ + +#define __LAYER_CLEANING_POLICY_ALRU_H__ + +#include "cleaning.h" +#include "alru_structs.h" + +void cleaning_policy_alru_setup(struct ocf_cache *cache); +int cleaning_policy_alru_initialize(struct ocf_cache *cache, + int init_metadata); +void cleaning_policy_alru_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line); +void cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache, + uint32_t cache_line); +int cleaning_policy_alru_purge_range(struct ocf_cache *cache, int core_id, + uint64_t start_byte, uint64_t end_byte); +void cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line); +int cleaning_policy_alru_set_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t param_value); +int cleaning_policy_alru_get_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t *param_value); +int cleaning_alru_perform_cleaning(struct ocf_cache *cache, uint32_t io_queue); + +#endif + diff --git a/src/cleaning/alru_structs.h b/src/cleaning/alru_structs.h new file mode 100644 index 0000000..c4783fd --- /dev/null +++ b/src/cleaning/alru_structs.h @@ -0,0 +1,32 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __CLEANING_ALRU_STRUCTS_H__ +#define __CLEANING_ALRU_STRUCTS_H__ + +#include "ocf/ocf.h" +#include "ocf_env.h" + +struct alru_cleaning_policy_meta { + /* Lru pointers 2*4=8 bytes */ + uint32_t timestamp; + uint32_t lru_prev; + uint32_t lru_next; +} __attribute__((packed)); + +struct alru_cleaning_policy_config { + uint32_t thread_wakeup_time; /* in seconds */ + uint32_t stale_buffer_time; /* in seconds */ + uint32_t flush_max_buffers; /* in lines */ + uint32_t activity_threshold; /* in milliseconds */ +}; + +struct alru_cleaning_policy { + env_atomic size; + uint32_t lru_head; + uint32_t lru_tail; +}; + + +#endif diff --git a/src/cleaning/cleaning.c b/src/cleaning/cleaning.c new file mode 100644 index 0000000..37c9f70 --- /dev/null +++ b/src/cleaning/cleaning.c @@ -0,0 +1,137 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "cleaning.h" +#include "alru.h" +#include "acp.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_ctx_priv.h" +#include "../mngt/ocf_mngt_common.h" +#include "../metadata/metadata.h" + +#define SLEEP_TIME_MS (1000) + +struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max] = { + [ocf_cleaning_nop] = { + .name = "nop", + }, + [ocf_cleaning_alru] = { + .setup = cleaning_policy_alru_setup, + .init_cache_block = cleaning_policy_alru_init_cache_block, + .purge_cache_block = cleaning_policy_alru_purge_cache_block, + .purge_range = cleaning_policy_alru_purge_range, + .set_hot_cache_line = cleaning_policy_alru_set_hot_cache_line, + .initialize = cleaning_policy_alru_initialize, + .set_cleaning_param = cleaning_policy_alru_set_cleaning_param, + .get_cleaning_param = cleaning_policy_alru_get_cleaning_param, + .perform_cleaning = cleaning_alru_perform_cleaning, + .name = "alru", + }, + [ocf_cleaning_acp] = { + .setup = cleaning_policy_acp_setup, + .init_cache_block = cleaning_policy_acp_init_cache_block, + .purge_cache_block = cleaning_policy_acp_purge_block, + .purge_range = cleaning_policy_acp_purge_range, + .set_hot_cache_line = cleaning_policy_acp_set_hot_cache_line, + .initialize = cleaning_policy_acp_initialize, + .deinitialize = cleaning_policy_acp_deinitialize, + .set_cleaning_param = cleaning_policy_acp_set_cleaning_param, + .get_cleaning_param = cleaning_policy_acp_get_cleaning_param, + .add_core = cleaning_policy_acp_add_core, + .remove_core = cleaning_policy_acp_remove_core, + .perform_cleaning = cleaning_policy_acp_perform_cleaning, + .name = "acp", + }, +}; + +int ocf_start_cleaner(struct ocf_cache *cache) +{ + return ctx_cleaner_init(cache->owner, &cache->cleaner); +} + +void ocf_stop_cleaner(struct ocf_cache *cache) +{ + ctx_cleaner_stop(cache->owner, &cache->cleaner); +} + +void ocf_cleaner_set_priv(ocf_cleaner_t c, void *priv) +{ + OCF_CHECK_NULL(c); + c->priv = priv; +} + +void *ocf_cleaner_get_priv(ocf_cleaner_t c) +{ + OCF_CHECK_NULL(c); + return c->priv; +} + +ocf_cache_t ocf_cleaner_get_cache(ocf_cleaner_t c) +{ + OCF_CHECK_NULL(c); + return container_of(c, struct ocf_cache, cleaner); +} + +static int _ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache) +{ + int i; + + if (!env_bit_test(ocf_cache_state_incomplete, &cache->cache_state)) + return 0; + + for (i = 0; i < OCF_CORE_MAX; ++i) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + if (cache->core_obj[i].opened && env_atomic_read(&(cache-> + core_runtime_meta[i].dirty_clines))) { + return 0; + } + } + + return 1; +} + +uint32_t ocf_cleaner_run(ocf_cleaner_t c, uint32_t io_queue) +{ + struct ocf_cache *cache; + ocf_cleaning_t clean_type; + int sleep = SLEEP_TIME_MS; + + cache = ocf_cleaner_get_cache(c); + + /* Do not involve cleaning when cache is not running + * (error, etc.). + */ + if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) || + ocf_mngt_is_cache_locked(cache)) { + return SLEEP_TIME_MS; + } + + /* Sleep in case there is management operation in progress. */ + if (env_rwsem_down_write_trylock(&cache->lock) == 0) + return SLEEP_TIME_MS; + + if (_ocf_cleaner_run_check_dirty_inactive(cache)) { + env_rwsem_up_write(&cache->lock); + return SLEEP_TIME_MS; + } + + clean_type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(clean_type >= ocf_cleaning_max); + + /* Call cleaning. */ + if (cleaning_policy_ops[clean_type].perform_cleaning) { + sleep = cleaning_policy_ops[clean_type]. + perform_cleaning(cache, io_queue); + } + + env_rwsem_up_write(&cache->lock); + + return sleep; +} + diff --git a/src/cleaning/cleaning.h b/src/cleaning/cleaning.h new file mode 100644 index 0000000..dc2039b --- /dev/null +++ b/src/cleaning/cleaning.h @@ -0,0 +1,75 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __LAYER_CLEANING_POLICY_H__ +#define __LAYER_CLEANING_POLICY_H__ + +#include "alru_structs.h" +#include "nop_structs.h" +#include "acp_structs.h" + +#define CLEANING_POLICY_CONFIG_BYTES 256 +#define CLEANING_POLICY_TYPE_MAX 4 + +struct ocf_request; + +struct cleaning_policy_config { + uint8_t data[CLEANING_POLICY_CONFIG_BYTES]; + struct acp_cleaning_policy_config acp; +}; + +struct cleaning_policy { + union { + struct nop_cleaning_policy nop; + struct alru_cleaning_policy alru; + } policy; +}; + +/* Cleaning policy metadata per cache line */ +struct cleaning_policy_meta { + union { + struct nop_cleaning_policy_meta nop; + struct alru_cleaning_policy_meta alru; + struct acp_cleaning_policy_meta acp; + } meta; +}; + +struct cleaning_policy_ops { + void (*setup)(struct ocf_cache *cache); + int (*initialize)(struct ocf_cache *cache, int init_metadata); + void (*deinitialize)(struct ocf_cache *cache); + int (*add_core)(struct ocf_cache *cache, ocf_core_id_t core_id); + void (*remove_core)(struct ocf_cache *cache, ocf_core_id_t core_id); + void (*init_cache_block)(struct ocf_cache *cache, uint32_t cache_line); + void (*purge_cache_block)(struct ocf_cache *cache, + uint32_t cache_line); + int (*purge_range)(struct ocf_cache *cache, int core_id, + uint64_t start_byte, uint64_t end_byte); + void (*set_hot_cache_line)(struct ocf_cache *cache, + uint32_t cache_line); + int (*set_cleaning_param)(struct ocf_cache *cache, + uint32_t param_id, uint32_t param_value); + int (*get_cleaning_param)(struct ocf_cache *cache, + uint32_t param_id, uint32_t *param_value); + /** + * @brief Performs cleaning. + * @return requested time (in ms) of next call + */ + int (*perform_cleaning)(struct ocf_cache *cache, + uint32_t io_queue); + const char *name; +}; + +extern struct cleaning_policy_ops cleaning_policy_ops[ocf_cleaning_max]; + +struct ocf_cleaner { + void *priv; +}; + +int ocf_start_cleaner(struct ocf_cache *cache); + +void ocf_stop_cleaner(struct ocf_cache *cache); + +#endif diff --git a/src/cleaning/cleaning_priv.h b/src/cleaning/cleaning_priv.h new file mode 100644 index 0000000..028cbd1 --- /dev/null +++ b/src/cleaning/cleaning_priv.h @@ -0,0 +1,19 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +static inline void cleaning_policy_param_error(ocf_cache_t cache, + const char *param_name, uint32_t min, uint32_t max) +{ + ocf_cache_log(cache, log_err, "Refusing setting flush " + "parameters because parameter %s is not within range " + "of <%d-%d>\n", param_name, min, max); +} + +#define OCF_CLEANING_CHECK_PARAM(CACHE, VAL, MIN, MAX, NAME) ({ \ + if (VAL < MIN || VAL > MAX) { \ + cleaning_policy_param_error(CACHE, NAME, MIN, MAX); \ + return -OCF_ERR_INVAL; \ + } \ +}) diff --git a/src/cleaning/nop_structs.h b/src/cleaning/nop_structs.h new file mode 100644 index 0000000..d12342f --- /dev/null +++ b/src/cleaning/nop_structs.h @@ -0,0 +1,15 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__ + +#define __LAYER_CLEANING_POLICY_NOP_STRUCTS_H__ + +struct nop_cleaning_policy_meta { +} __attribute__((packed)); + +struct nop_cleaning_policy { +}; + +#endif diff --git a/src/concurrency/ocf_cache_concurrency.c b/src/concurrency/ocf_cache_concurrency.c new file mode 100644 index 0000000..792f865 --- /dev/null +++ b/src/concurrency/ocf_cache_concurrency.c @@ -0,0 +1,1125 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf_concurrency.h" +#include "../ocf_priv.h" +#include "../ocf_request.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_allocator.h" + +#define OCF_CACHE_CONCURRENCY_DEBUG 0 + +#if 1 == OCF_CACHE_CONCURRENCY_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Concurrency][Cache] %s\n", __func__) + +#define OCF_DEBUG_RQ(rq, format, ...) \ + ocf_cache_log(rq->cache, log_info, "[Concurrency][Cache][%s] %s - " \ + format"\n", OCF_READ == (rq)->rw ? "RD" : "WR", \ + __func__, ##__VA_ARGS__) + +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_RQ(rq, format, ...) +#endif + +#define OCF_CACHE_LINE_ACCESS_WR INT_MAX +#define OCF_CACHE_LINE_ACCESS_IDLE 0 +#define OCF_CACHE_LINE_ACCESS_ONE_RD 1 + +#define _WAITERS_LIST_SIZE (16UL * MiB) +#define _WAITERS_LIST_ENTRIES \ + (_WAITERS_LIST_SIZE / sizeof(struct __waiters_list)) + +#define _WAITERS_LIST_ITEM(cache_line) ((cache_line) % _WAITERS_LIST_ENTRIES) + +typedef void (*__on_lock)(void *ctx, uint32_t ctx_id, ocf_cache_line_t line, + int rw); + +struct __waiter { + ocf_cache_line_t line; + void *ctx; + uint32_t ctx_id; + __on_lock on_lock; + struct list_head item; + int rw; +}; + +struct __waiters_list { + struct list_head head; + env_spinlock lock; +}; + +struct ocf_cache_concurrency { + env_rwlock lock; + env_atomic *access; + env_atomic waiting; + size_t access_limit; + env_allocator *allocator; + struct __waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES]; + +}; + +/* + * + */ + +#define ALLOCATOR_NAME_FMT "ocf_%s_cache_concurrency" +#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE) + +int ocf_cache_concurrency_init(struct ocf_cache *cache) +{ + uint32_t i; + int error = 0; + struct ocf_cache_concurrency *c; + char name[ALLOCATOR_NAME_MAX]; + + ENV_BUG_ON(cache->device->concurrency.cache); + + OCF_DEBUG_TRACE(cache); + + c = env_vmalloc(sizeof(*c)); + if (!c) { + error = __LINE__; + goto ocf_cache_concurrency_init; + } + + cache->device->concurrency.cache = c; + + OCF_REALLOC_INIT(&c->access, &c->access_limit); + OCF_REALLOC_CP(&c->access, sizeof(c->access[0]), + cache->device->collision_table_entries, &c->access_limit); + + if (!c->access) { + error = __LINE__; + goto ocf_cache_concurrency_init; + } + + if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT, + ocf_cache_get_name(cache)) < 0) { + error = __LINE__; + goto ocf_cache_concurrency_init; + } + + c->allocator = env_allocator_create(sizeof(struct __waiter), name); + if (!c->allocator) { + error = __LINE__; + goto ocf_cache_concurrency_init; + } + + /* Init concurrency control table */ + for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) { + INIT_LIST_HEAD(&c->waiters_lsts[i].head); + env_spinlock_init(&c->waiters_lsts[i].lock); + } + + env_rwlock_init(&c->lock); + + return 0; + +ocf_cache_concurrency_init: + + ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, " + "ERROR %d", error); + + ocf_cache_concurrency_deinit(cache); + + return -1; +} + +/* + * + */ +void ocf_cache_concurrency_deinit(struct ocf_cache *cache) +{ + struct ocf_cache_concurrency *concurrency; + + if (!cache->device->concurrency.cache) + return; + + OCF_DEBUG_TRACE(cache); + + concurrency = cache->device->concurrency.cache; + + if (concurrency->access) + OCF_REALLOC_DEINIT(&concurrency->access, + &concurrency->access_limit); + + if (concurrency->allocator) + env_allocator_destroy(concurrency->allocator); + + env_vfree(concurrency); + cache->device->concurrency.cache = NULL; +} + +size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache) +{ + size_t size; + + size = sizeof(env_atomic); + size *= cache->device->collision_table_entries; + + size += sizeof(struct ocf_cache_concurrency); + + return size; +} + +/* + * + */ +static inline bool __are_waiters(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + bool are = false; + struct list_head *iter; + uint32_t idx = _WAITERS_LIST_ITEM(line); + struct __waiters_list *lst = &c->waiters_lsts[idx]; + struct __waiter *waiter; + + /* If list empty that means there are no waiters on cache line */ + if (list_empty(&lst->head)) + return false; + + list_for_each(iter, &lst->head) { + waiter = list_entry(iter, struct __waiter, item); + + if (waiter->line == line) { + are = true; + break; + } + } + + return are; +} + +/* + * + */ +static inline void __add_waiter(struct ocf_cache_concurrency *c, + ocf_cache_line_t line, struct __waiter *waiter) +{ + uint32_t idx = _WAITERS_LIST_ITEM(line); + struct __waiters_list *lst = &c->waiters_lsts[idx]; + + list_add_tail(&waiter->item, &lst->head); +} + + +#define __lock_waiters_list(cncrrncy, line, flags) \ + do { \ + uint32_t idx = _WAITERS_LIST_ITEM(line); \ + struct __waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \ + env_spinlock_lock_irqsave(&lst->lock, flags); \ + } while (0) + +#define __unlock_waiters_list(cncrrncy, line, flags) \ + do { \ + uint32_t idx = _WAITERS_LIST_ITEM(line); \ + struct __waiters_list *lst = &cncrrncy->waiters_lsts[idx]; \ + env_spinlock_unlock_irqrestore(&lst->lock, flags); \ + } while (0) + + +/* + * + */ +static inline bool __try_lock_wr(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE, + OCF_CACHE_LINE_ACCESS_WR); + + if (prev == OCF_CACHE_LINE_ACCESS_IDLE) + return true; + else + return false; +} + +/* + * + */ +static inline bool __try_lock_rd_idle(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE, + OCF_CACHE_LINE_ACCESS_ONE_RD); + + return (prev == OCF_CACHE_LINE_ACCESS_IDLE); +} + +/* + * + */ +static inline bool __try_lock_rd(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + return !!env_atomic_add_unless(access, 1, OCF_CACHE_LINE_ACCESS_WR); +} + +/* + * + */ +static inline void __unlock_wr(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR); + env_atomic_set(access, OCF_CACHE_LINE_ACCESS_IDLE); +} + +/* + * + */ +static inline void __unlock_rd(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + ENV_BUG_ON(env_atomic_read(access) == 0); + ENV_BUG_ON(env_atomic_read(access) == OCF_CACHE_LINE_ACCESS_WR); + env_atomic_dec(access); +} + +/* + * + */ +static inline bool __try_lock_wr2wr(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR); + return true; +} + +/* + * + */ +static inline bool __try_lock_wr2rd(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR); + env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD); + return true; +} + +/* + * + */ +static inline bool __try_lock_rd2wr(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + int v = env_atomic_read(access); + + ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE); + ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR); + + v = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_ONE_RD, + OCF_CACHE_LINE_ACCESS_WR); + + return (v == OCF_CACHE_LINE_ACCESS_ONE_RD); +} + +/* + * + */ +static inline bool __try_lock_rd2rd(struct ocf_cache_concurrency *c, + ocf_cache_line_t line) +{ + env_atomic *access = &c->access[line]; + + int v = env_atomic_read(access); + + ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_IDLE); + ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR); + + return true; +} + +/* + * + */ +static inline bool __lock_cache_line_wr(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line, __on_lock on_lock, + void *ctx, uint32_t ctx_id) +{ + struct __waiter *waiter; + bool locked = false; + bool waiting = false; + unsigned long flags = 0; + + if (__try_lock_wr(c, line)) { + /* No activity before look get */ + if (on_lock) + on_lock(ctx, ctx_id, line, OCF_WRITE); + return true; + } + + __lock_waiters_list(c, line, flags); + + /* At the moment list is protected, double check if the cache line is + * unlocked + */ + if (__try_lock_wr(c, line)) { + /* Look get */ + locked = true; + } else { + waiter = NULL; + if (on_lock != NULL) { + /* Need to create waiters and add it into list */ + waiter = env_allocator_new(c->allocator); + } + if (waiter) { + /* Setup waiters filed */ + waiter->line = line; + waiter->ctx = ctx; + waiter->ctx_id = ctx_id; + waiter->on_lock = on_lock; + waiter->rw = OCF_WRITE; + INIT_LIST_HEAD(&waiter->item); + + /* Add to waiters list */ + __add_waiter(c, line, waiter); + waiting = true; + } + } + + __unlock_waiters_list(c, line, flags); + + if (locked && on_lock) + on_lock(ctx, ctx_id, line, OCF_WRITE); + + return locked || waiting; +} + +/* + * Attempt to lock cache line for read. + * In case cache line is locked, attempt to add caller on wait list. + */ +static inline bool __lock_cache_line_rd(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line, __on_lock on_lock, + void *ctx, uint32_t ctx_id) +{ + struct __waiter *waiter; + bool locked = false; + bool waiting = false; + unsigned long flags = 0; + + if (__try_lock_rd_idle(c, line)) { + /* No activity before look get, it is first reader */ + if (on_lock) + on_lock(ctx, ctx_id, line, OCF_READ); + return true; + } + + /* Lock waiters list */ + __lock_waiters_list(c, line, flags); + + if (!__are_waiters(c, line)) { + /* No waiters at the moment */ + + /* Check if read lock can be obtained */ + if (__try_lock_rd(c, line)) { + /* Cache line locked */ + locked = true; + } + } + + if (!locked) { + waiter = NULL; + if (on_lock) { + /* Need to create waiters and add it into list */ + waiter = env_allocator_new(c->allocator); + } + if (waiter) { + /* Setup waiters field */ + waiter->line = line; + waiter->ctx = ctx; + waiter->ctx_id = ctx_id; + waiter->on_lock = on_lock; + waiter->rw = OCF_READ; + INIT_LIST_HEAD(&waiter->item); + + /* Add to waiters list */ + __add_waiter(c, line, waiter); + waiting = true; + } + } + + __unlock_waiters_list(c, line, flags); + + if (locked && on_lock) + on_lock(ctx, ctx_id, line, OCF_READ); + + return locked || waiting; +} + +static inline void __unlock_cache_line_rd_common(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line) +{ + bool locked = false; + bool exchanged = true; + uint32_t i = 0; + + uint32_t idx = _WAITERS_LIST_ITEM(line); + struct __waiters_list *lst = &c->waiters_lsts[idx]; + struct __waiter *waiter; + + struct list_head *iter, *next; + + /* + * Lock exchange scenario + * 1. RD -> IDLE + * 2. RD -> RD + * 3. RD -> WR + */ + + /* Check is requested page is on the list */ + list_for_each_safe(iter, next, &lst->head) { + waiter = list_entry(iter, struct __waiter, item); + + if (line != waiter->line) + continue; + + if (exchanged) { + if (waiter->rw == OCF_WRITE) + locked = __try_lock_rd2wr(c, line); + else if (waiter->rw == OCF_READ) + locked = __try_lock_rd2rd(c, line); + else + ENV_BUG(); + } else { + if (waiter->rw == OCF_WRITE) + locked = __try_lock_wr(c, line); + else if (waiter->rw == OCF_READ) + locked = __try_lock_rd(c, line); + else + ENV_BUG(); + } + + i++; + + if (locked) { + exchanged = false; + list_del(iter); + + waiter->on_lock(waiter->ctx, waiter->ctx_id, line, + waiter->rw); + + env_allocator_del(c->allocator, waiter); + } else { + break; + } + } + + if (exchanged) { + /* No exchange, no waiters on the list, unlock and return + * WR -> IDLE + */ + __unlock_rd(c, line); + } +} + +/* + * + */ +static inline void __unlock_cache_line_rd(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line) +{ + unsigned long flags = 0; + + /* Lock waiters list */ + __lock_waiters_list(c, line, flags); + __unlock_cache_line_rd_common(c, line); + __unlock_waiters_list(c, line, flags); +} + + +static inline void __unlock_cache_line_wr_common(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line) +{ + uint32_t i = 0; + bool locked = false; + bool exchanged = true; + + uint32_t idx = _WAITERS_LIST_ITEM(line); + struct __waiters_list *lst = &c->waiters_lsts[idx]; + struct __waiter *waiter; + + struct list_head *iter, *next; + + /* + * Lock exchange scenario + * 1. WR -> IDLE + * 2. WR -> RD + * 3. WR -> WR + */ + + /* Check is requested page is on the list */ + list_for_each_safe(iter, next, &lst->head) { + waiter = list_entry(iter, struct __waiter, item); + + if (line != waiter->line) + continue; + + if (exchanged) { + if (waiter->rw == OCF_WRITE) + locked = __try_lock_wr2wr(c, line); + else if (waiter->rw == OCF_READ) + locked = __try_lock_wr2rd(c, line); + else + ENV_BUG(); + } else { + if (waiter->rw == OCF_WRITE) + locked = __try_lock_wr(c, line); + else if (waiter->rw == OCF_READ) + locked = __try_lock_rd(c, line); + else + ENV_BUG(); + } + + i++; + + if (locked) { + exchanged = false; + list_del(iter); + + waiter->on_lock(waiter->ctx, waiter->ctx_id, line, + waiter->rw); + + env_allocator_del(c->allocator, waiter); + } else { + break; + } + } + + if (exchanged) { + /* No exchange, no waiters on the list, unlock and return + * WR -> IDLE + */ + __unlock_wr(c, line); + } +} + +/* + * + */ +static inline void __unlock_cache_line_wr(struct ocf_cache_concurrency *c, + const ocf_cache_line_t line) +{ + unsigned long flags = 0; + + /* Lock waiters list */ + __lock_waiters_list(c, line, flags); + __unlock_cache_line_wr_common(c, line); + __unlock_waiters_list(c, line, flags); +} + +/* + * Safely remove cache line lock waiter from waiting list. + * Request can be assigned with lock asynchronously at any point of time, + * so need to check lock state under a common lock. + */ +static inline void __remove_line_from_waiters_list(struct ocf_cache_concurrency *c, + struct ocf_request *rq, int i, void *ctx, int rw) +{ + ocf_cache_line_t line = rq->map[i].coll_idx; + uint32_t idx = _WAITERS_LIST_ITEM(line); + struct __waiters_list *lst = &c->waiters_lsts[idx]; + struct list_head *iter, *next; + struct __waiter *waiter; + unsigned long flags; + + __lock_waiters_list(c, line, flags); + + if (rw == OCF_READ && rq->map[i].rd_locked) { + __unlock_cache_line_rd_common(c, line); + rq->map[i].rd_locked = false; + } else if (rw == OCF_WRITE && rq->map[i].wr_locked) { + __unlock_cache_line_wr_common(c, line); + rq->map[i].wr_locked = false; + } else { + list_for_each_safe(iter, next, &lst->head) { + waiter = list_entry(iter, struct __waiter, item); + if (waiter->ctx == ctx) { + list_del(iter); + env_allocator_del(c->allocator, waiter); + } + } + } + + __unlock_waiters_list(c, line, flags); +} + +/* + * + */ +static int _ocf_rq_lock_rd_common(struct ocf_request *rq, void *context, + __on_lock on_lock) +{ + bool locked, waiting; + int32_t i; + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + ocf_cache_line_t line; + + OCF_DEBUG_RQ(rq, "Lock"); + + ENV_BUG_ON(env_atomic_read(&rq->lock_remaining)); + ENV_BUG_ON(!on_lock); + + /* Try lock request without adding waiters */ + + env_rwlock_read_lock(&c->lock); + /* At this point we have many thread that tries get lock for request */ + + locked = true; + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + continue; + } + + line = rq->map[i].coll_idx; + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + ENV_BUG_ON(rq->map[i].rd_locked); + ENV_BUG_ON(rq->map[i].wr_locked); + + if (__lock_cache_line_rd(c, line, NULL, NULL, 0)) { + /* cache line locked */ + rq->map[i].rd_locked = true; + } else { + /* Not possible to lock all request */ + locked = false; + OCF_DEBUG_RQ(rq, "NO Lock, cache line = %u", line); + break; + } + } + + /* Check if request is locked */ + if (!locked) { + /* Request is not locked, discard acquired locks */ + for (; i >= 0; i--) { + line = rq->map[i].coll_idx; + + if (rq->map[i].rd_locked) { + __unlock_rd(c, line); + rq->map[i].rd_locked = false; + } + } + } + + env_rwlock_read_unlock(&c->lock); + + if (locked) { + /* Request completely locked, return acquired status */ + return OCF_LOCK_ACQUIRED; + } + + env_atomic_set(&rq->lock_remaining, rq->core_line_count); + env_atomic_inc(&rq->lock_remaining); + + env_rwlock_write_lock(&c->lock); + /* At this point one thread tries to get locks */ + + OCF_DEBUG_RQ(rq, "Exclusive"); + + waiting = true; + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + env_atomic_dec(&rq->lock_remaining); + continue; + } + + line = rq->map[i].coll_idx; + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + ENV_BUG_ON(rq->map[i].rd_locked); + ENV_BUG_ON(rq->map[i].wr_locked); + + if (!__lock_cache_line_rd(c, line, on_lock, context, i)) { + /* lock not acquired and not added to wait list */ + waiting = false; + break; + } + } + + if (!waiting) { + for (; i >= 0; i--) + __remove_line_from_waiters_list(c, rq, i, context, OCF_READ); + } + + OCF_DEBUG_RQ(rq, "Exclusive END"); + + env_rwlock_write_unlock(&c->lock); + + if (env_atomic_dec_return(&rq->lock_remaining) == 0) + return OCF_LOCK_ACQUIRED; + + if (waiting) { + env_atomic_inc(&c->waiting); + return OCF_LOCK_NOT_ACQUIRED; + } + + return -ENOMEM; +} + +/* + * + */ +static void _rq_on_lock(void *ctx, uint32_t ctx_id, + ocf_cache_line_t line, int rw) +{ + struct ocf_request *rq = ctx; + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + + if (rw == OCF_READ) + rq->map[ctx_id].rd_locked = true; + else if (rw == OCF_WRITE) + rq->map[ctx_id].wr_locked = true; + else + ENV_BUG(); + + if (env_atomic_dec_return(&rq->lock_remaining) == 0) { + /* All cache line locked, resume request */ + OCF_DEBUG_RQ(rq, "Resume"); + OCF_CHECK_NULL(rq->resume); + env_atomic_dec(&c->waiting); + rq->resume(rq); + } +} + +/* + * + */ +int ocf_rq_trylock_rd(struct ocf_request *rq) +{ + OCF_CHECK_NULL(rq->resume); + return _ocf_rq_lock_rd_common(rq, rq, _rq_on_lock); +} + +/* + * Lock wait request context + */ +struct _rq_wait_context { + struct ocf_request *rq; + env_completion cmpl; +}; + +/* + * + */ +static int _ocf_rq_lock_wr_common(struct ocf_request *rq, void *context, + __on_lock on_lock) +{ + bool locked, waiting; + int32_t i; + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + ocf_cache_line_t line; + + OCF_DEBUG_RQ(rq, "Lock"); + + ENV_BUG_ON(env_atomic_read(&rq->lock_remaining)); + + /* Try lock request without adding waiters */ + + env_rwlock_read_lock(&c->lock); + /* At this point many thread that tries getting lock for request */ + + locked = true; + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + continue; + } + + line = rq->map[i].coll_idx; + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + ENV_BUG_ON(rq->map[i].rd_locked); + ENV_BUG_ON(rq->map[i].wr_locked); + + if (__lock_cache_line_wr(c, line, NULL, NULL, 0)) { + /* cache line locked */ + rq->map[i].wr_locked = true; + } else { + /* Not possible to lock all request */ + locked = false; + OCF_DEBUG_RQ(rq, "NO Lock, cache line = %u", line); + break; + } + } + + /* Check if request is locked */ + if (!locked) { + /* Request is not locked, discard acquired locks */ + for (; i >= 0; i--) { + line = rq->map[i].coll_idx; + + if (rq->map[i].wr_locked) { + __unlock_wr(c, line); + rq->map[i].wr_locked = false; + } + } + } + + env_rwlock_read_unlock(&c->lock); + + if (locked) { + /* Request completely locked, return acquired status */ + return OCF_LOCK_ACQUIRED; + } + + env_atomic_set(&rq->lock_remaining, rq->core_line_count); + env_atomic_inc(&rq->lock_remaining); + + env_rwlock_write_lock(&c->lock); + /* At this point one thread tires getting locks */ + + OCF_DEBUG_RQ(rq, "Exclusive"); + + waiting = true; + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + env_atomic_dec(&rq->lock_remaining); + continue; + } + + line = rq->map[i].coll_idx; + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + ENV_BUG_ON(rq->map[i].rd_locked); + ENV_BUG_ON(rq->map[i].wr_locked); + + if (!__lock_cache_line_wr(c, line, on_lock, context, i)) { + /* lock not acquired and not added to wait list */ + waiting = false; + break; + } + } + + if (!waiting) { + for (; i >= 0; i--) + __remove_line_from_waiters_list(c, rq, i, context, OCF_WRITE); + } + + OCF_DEBUG_RQ(rq, "Exclusive END"); + + env_rwlock_write_unlock(&c->lock); + + if (env_atomic_dec_return(&rq->lock_remaining) == 0) + return OCF_LOCK_ACQUIRED; + + if (waiting) { + env_atomic_inc(&c->waiting); + return OCF_LOCK_NOT_ACQUIRED; + } + + return -ENOMEM; +} + +/* + * + */ +int ocf_rq_trylock_wr(struct ocf_request *rq) +{ + OCF_CHECK_NULL(rq->resume); + return _ocf_rq_lock_wr_common(rq, rq, _rq_on_lock); +} + +/* + * + */ +void ocf_rq_unlock_rd(struct ocf_request *rq) +{ + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + int32_t i; + ocf_cache_line_t line; + + OCF_DEBUG_RQ(rq, "Unlock"); + + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + continue; + } + + line = rq->map[i].coll_idx; + + ENV_BUG_ON(!rq->map[i].rd_locked); + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + + __unlock_cache_line_rd(c, line); + rq->map[i].rd_locked = false; + } +} + +/* + * + */ +void ocf_rq_unlock_wr(struct ocf_request *rq) +{ + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + int32_t i; + ocf_cache_line_t line; + + OCF_DEBUG_RQ(rq, "Unlock"); + + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + continue; + } + + line = rq->map[i].coll_idx; + + ENV_BUG_ON(!rq->map[i].wr_locked); + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + + __unlock_cache_line_wr(c, line); + rq->map[i].wr_locked = false; + } +} + +/* + * + */ +void ocf_rq_unlock(struct ocf_request *rq) +{ + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + int32_t i; + ocf_cache_line_t line; + + OCF_DEBUG_RQ(rq, "Unlock"); + + for (i = 0; i < rq->core_line_count; i++) { + + if (rq->map[i].status == LOOKUP_MISS) { + /* MISS nothing to lock */ + continue; + } + + line = rq->map[i].coll_idx; + ENV_BUG_ON(line >= rq->cache->device->collision_table_entries); + + if (rq->map[i].rd_locked && rq->map[i].wr_locked) { + ENV_BUG(); + } else if (rq->map[i].rd_locked) { + __unlock_cache_line_rd(c, line); + rq->map[i].rd_locked = false; + } else if (rq->map[i].wr_locked) { + __unlock_cache_line_wr(c, line); + rq->map[i].wr_locked = false; + } else { + ENV_BUG(); + } + } +} + +/* + * + */ +void ocf_rq_unlock_entry(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t entry) +{ + struct ocf_cache_concurrency *c = rq->cache->device->concurrency.cache; + + ENV_BUG_ON(rq->map[entry].status == LOOKUP_MISS); + + if (rq->map[entry].rd_locked && rq->map[entry].wr_locked) { + ENV_BUG(); + } else if (rq->map[entry].rd_locked) { + __unlock_cache_line_rd(c, rq->map[entry].coll_idx); + rq->map[entry].rd_locked = false; + } else if (rq->map[entry].wr_locked) { + __unlock_cache_line_wr(c, rq->map[entry].coll_idx); + rq->map[entry].wr_locked = false; + } else { + ENV_BUG(); + } +} + +/* + * + */ +bool ocf_cache_line_is_used(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_cache_concurrency *c = cache->device->concurrency.cache; + + ENV_BUG_ON(line >= cache->device->collision_table_entries); + + if (env_atomic_read(&(c->access[line]))) + return true; + + if (ocf_cache_line_are_waiters(cache, line)) + return true; + else + return false; +} + +/* + * + */ +bool ocf_cache_line_are_waiters(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_cache_concurrency *c = cache->device->concurrency.cache; + bool are; + unsigned long flags = 0; + + ENV_BUG_ON(line >= cache->device->collision_table_entries); + + /* Lock waiters list */ + __lock_waiters_list(c, line, flags); + + are = __are_waiters(c, line); + + __unlock_waiters_list(c, line, flags); + + return are; +} + +/* + * + */ +uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache) +{ + struct ocf_cache_concurrency *c = cache->device->concurrency.cache; + + return env_atomic_read(&c->waiting); +} + +bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line) +{ + struct ocf_cache_concurrency *c = cache->device->concurrency.cache; + return __lock_cache_line_rd(c, line, NULL, NULL, 0); +} + +/* + * + */ +void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line) +{ + struct ocf_cache_concurrency *c = cache->device->concurrency.cache; + + OCF_DEBUG_RQ(cache, "Cache line = %u", line); + + __unlock_cache_line_rd(c, line); +} + diff --git a/src/concurrency/ocf_cache_concurrency.h b/src/concurrency/ocf_cache_concurrency.h new file mode 100644 index 0000000..f69bab3 --- /dev/null +++ b/src/concurrency/ocf_cache_concurrency.h @@ -0,0 +1,176 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_CACHE_CONCURRENCY_H_ +#define OCF_CACHE_CONCURRENCY_H_ + +/** + * @file utils_rq.h + * @brief OCF cache concurrency module + */ + +/** + * @brief OCF cache concurrency module handle + */ +struct ocf_cache_concurrency; + +/** + * @brief Initialize OCF cache concurrency module + * + * @param cache - OCF cache instance + * @return 0 - Initialization successful, otherwise ERROR + */ +int ocf_cache_concurrency_init(struct ocf_cache *cache); + +/** + * @biref De-Initialize OCF cache concurrency module + * + * @param cache - OCF cache instance + */ +void ocf_cache_concurrency_deinit(struct ocf_cache *cache); + +/** + * @brief Get number of waiting (suspended) OCF requests in due to cache + * overlapping + * + * @param cache - OCF cache instance + * + * @return Number of suspended OCF requests + */ +uint32_t ocf_cache_concurrency_suspended_no(struct ocf_cache *cache); + +/** + * @brief Return memory footprint conusmed by cache concurrency module + * + * @param cache - OCF cache instance + * + * @return Memory footprint of cache concurrency module + */ +size_t ocf_cache_concurrency_size_of(struct ocf_cache *cache); + +/** + * @brief Lock OCF request for WRITE access (Lock all cache lines in map info) + * + * @note rq->resume callback has to be set + * + * @param rq - OCF request + * + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was + * added into waiting list. When lock will be acquired rq->resume be called + */ +int ocf_rq_trylock_wr(struct ocf_request *rq); + +/** + * @brief Try complete lock of OCF request for WRITE access (Lock cache lines + * that marked as invalid) + * + * @param rq - OCF request + * + * @note If request not locked it will be added into waiting list + * + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was + * added into waiting list. When lock will be acquired rq->resume be called + */ +int ocf_rq_retrylock_wr(struct ocf_request *rq); + +/** + * @brief Lock OCF request for READ access (Lock all cache lines in map info) + * + * @note rq->resume callback has to be set + * + * @param rq - OCF request + * + * @retval OCF_LOCK_ACQUIRED - OCF request has been locked and can be processed + * + * @retval OCF_LOCK_NOT_ACQUIRED - OCF request lock not acquired, request was + * added into waiting list. When lock will be acquired rq->resume be called + */ +int ocf_rq_trylock_rd(struct ocf_request *rq); + +/** + * @brief Unlock OCF request from WRITE access + * + * @param rq - OCF request + */ +void ocf_rq_unlock_wr(struct ocf_request *rq); + +/** + * @brief Unlock OCF request from READ access + * + * @param rq - OCF request + */ +void ocf_rq_unlock_rd(struct ocf_request *rq); + +/** + * @brief Unlock OCF request from READ or WRITE access + * + * @param rq - OCF request + */ +void ocf_rq_unlock(struct ocf_request *rq); + +/** + * @Check if cache line is used. + * + * Cache line is used when: + * 1. It is locked for write or read access + * or + * 2. There is set locked bit in metadata + * + * @param cache - OCF cache instance + * @param line - Cache line to be unlocked + * + * @retval true - cache line is used + * @retval false - cache line is not used + */ +bool ocf_cache_line_is_used(struct ocf_cache *cache, + ocf_cache_line_t line); + +/** + * @brief Check if for specified cache line there are waiters + * on the waiting list + * + * @param cache - OCF cache instance + * @param line - Cache line to be checked for waiters + * + * @retval true - there are waiters + * @retval false - No waiters + */ +bool ocf_cache_line_are_waiters(struct ocf_cache *cache, + ocf_cache_line_t line); + +/** + * @brief un_lock request map info entry from from WRITE or READ access. + * + * @param cache - OCF cache instance + * @param rq - OCF request + * @param entry - request map entry number + */ +void ocf_rq_unlock_entry(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t entry); + +/** + * @brief Release cache line read lock + * + * @param cache - OCF cache instance + * @param line - Cache line to be unlocked + */ +void ocf_cache_line_unlock_rd(struct ocf_cache *cache, ocf_cache_line_t line); + +/** + * @brief Attempt to lock cache line for read + * + * @param cache - OCF cache instance + * @param line - Cache line to be checked for waiters + * + * @retval true - read lock successfully acquired + * @retval false - failed to acquire read lock + */ +bool ocf_cache_line_try_lock_rd(struct ocf_cache *cache, ocf_cache_line_t line); + +#endif /* OCF_CONCURRENCY_H_ */ diff --git a/src/concurrency/ocf_concurrency.c b/src/concurrency/ocf_concurrency.c new file mode 100644 index 0000000..bea9190 --- /dev/null +++ b/src/concurrency/ocf_concurrency.c @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf_concurrency.h" + +int ocf_concurrency_init(struct ocf_cache *cache) +{ + int result = 0; + + result = ocf_cache_concurrency_init(cache); + + if (result) + ocf_concurrency_deinit(cache); + + return result; +} + +void ocf_concurrency_deinit(struct ocf_cache *cache) +{ + ocf_cache_concurrency_deinit(cache); +} + diff --git a/src/concurrency/ocf_concurrency.h b/src/concurrency/ocf_concurrency.h new file mode 100644 index 0000000..8c7e9da --- /dev/null +++ b/src/concurrency/ocf_concurrency.h @@ -0,0 +1,43 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_CONCURRENCY_H_ +#define OCF_CONCURRENCY_H_ + +#include "../ocf_cache_priv.h" + +/** + * @file utils_rq.h + * @brief OCF concurrency + */ + +/** + * @brief Lock result - Lock acquired successfully + */ +#define OCF_LOCK_ACQUIRED 0 + +/** + * @brief Lock result - Lock not acquired, lock request added into waiting list + */ +#define OCF_LOCK_NOT_ACQUIRED 1 + +/** + * @brief Initialize OCF concurrency module + * + * @param cache - OCF cache instance + * @return 0 - Initialization successful, otherwise ERROR + */ +int ocf_concurrency_init(struct ocf_cache *cache); + +/** + * @biref De-Initialize OCF concurrency module + * + * @param cache - OCF cache instance + */ +void ocf_concurrency_deinit(struct ocf_cache *cache); + +#include "ocf_cache_concurrency.h" + +#endif /* OCF_CONCURRENCY_H_ */ diff --git a/src/engine/cache_engine.c b/src/engine/cache_engine.c new file mode 100644 index 0000000..393ab70 --- /dev/null +++ b/src/engine/cache_engine.c @@ -0,0 +1,314 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_queue_priv.h" +#include "cache_engine.h" +#include "engine_common.h" +#include "engine_rd.h" +#include "engine_wt.h" +#include "engine_pt.h" +#include "engine_wi.h" +#include "engine_wa.h" +#include "engine_wb.h" +#include "engine_fast.h" +#include "engine_discard.h" +#include "engine_d2c.h" +#include "engine_ops.h" +#include "../utils/utils_part.h" +#include "../utils/utils_rq.h" +#include "../metadata/metadata.h" +#include "../layer_space_management.h" + +enum ocf_io_if_type { + /* Public OCF IO interfaces to be set by user */ + OCF_IO_WT_IF, + OCF_IO_WB_IF, + OCF_IO_WA_IF, + OCF_IO_WI_IF, + OCF_IO_PT_IF, + OCF_IO_MAX_IF, + + /* Private OCF interfaces */ + OCF_IO_FAST_IF, + OCF_IO_DISCARD_IF, + OCF_IO_D2C_IF, + OCF_IO_OPS_IF, + OCF_IO_PRIV_MAX_IF, +}; + +static const struct ocf_io_if IO_IFS[OCF_IO_PRIV_MAX_IF] = { + [OCF_IO_WT_IF] = { + .read = ocf_read_generic, + .write = ocf_write_wt, + .name = "Write Through" + }, + [OCF_IO_WB_IF] = { + .read = ocf_read_generic, + .write = ocf_write_wb, + .name = "Write Back" + }, + [OCF_IO_WA_IF] = { + .read = ocf_read_generic, + .write = ocf_write_wa, + .name = "Write Around" + }, + [OCF_IO_WI_IF] = { + .read = ocf_read_generic, + .write = ocf_write_wi, + .name = "Write Invalidate" + }, + [OCF_IO_PT_IF] = { + .read = ocf_read_pt, + .write = ocf_write_wi, + .name = "Pass Through", + }, + [OCF_IO_FAST_IF] = { + .read = ocf_read_fast, + .write = ocf_write_fast, + .name = "Fast", + }, + [OCF_IO_DISCARD_IF] = { + .read = ocf_discard, + .write = ocf_discard, + .name = "Discard", + }, + [OCF_IO_D2C_IF] = { + .read = ocf_io_d2c, + .write = ocf_io_d2c, + .name = "Direct to core", + }, + [OCF_IO_OPS_IF] = { + .read = ocf_engine_ops, + .write = ocf_engine_ops, + .name = "Ops engine", + }, +}; + +static const struct ocf_io_if *cache_mode_io_if_map[ocf_req_cache_mode_max] = { + [ocf_req_cache_mode_wt] = &IO_IFS[OCF_IO_WT_IF], + [ocf_req_cache_mode_wb] = &IO_IFS[OCF_IO_WB_IF], + [ocf_req_cache_mode_wa] = &IO_IFS[OCF_IO_WA_IF], + [ocf_req_cache_mode_wi] = &IO_IFS[OCF_IO_WI_IF], + [ocf_req_cache_mode_pt] = &IO_IFS[OCF_IO_PT_IF], + [ocf_req_cache_mode_fast] = &IO_IFS[OCF_IO_FAST_IF], + [ocf_req_cache_mode_d2c] = &IO_IFS[OCF_IO_D2C_IF], +}; + +const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t req_cache_mode) +{ + if (req_cache_mode == ocf_req_cache_mode_max) + return NULL; + return cache_mode_io_if_map[req_cache_mode]; +} + +struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache, + struct ocf_queue *q) +{ + unsigned long lock_flags; + struct ocf_request *rq; + + OCF_CHECK_NULL(q); + + /* LOCK */ + env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags); + + if (list_empty(&q->io_list)) { + /* No items on the list */ + env_spinlock_unlock_irqrestore(&q->io_list_lock, + lock_flags); + return NULL; + } + + /* Get the first request and remove it from the list */ + rq = list_first_entry(&q->io_list, struct ocf_request, list); + + env_atomic_dec(&q->io_no); + list_del(&rq->list); + + /* UNLOCK */ + env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags); + + OCF_CHECK_NULL(rq); + + if (ocf_rq_alloc_map(rq)) { + rq->complete(rq, rq->error); + return NULL; + } + + return rq; +} + +bool ocf_fallback_pt_is_on(ocf_cache_t cache) +{ + ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0); + + return (cache->fallback_pt_error_threshold != + OCF_CACHE_FALLBACK_PT_INACTIVE && + env_atomic_read(&cache->fallback_pt_error_counter) >= + cache->fallback_pt_error_threshold); +} + +#define SEQ_CUTOFF_FULL_MARGIN \ + (OCF_TO_EVICTION_MIN + OCF_PENDING_EVICTION_LIMIT) + +static inline bool ocf_seq_cutoff_is_on(ocf_cache_t cache) +{ + if (!env_atomic_read(&cache->attached)) + return false; + + return (cache->device->freelist_part->curr_size <= SEQ_CUTOFF_FULL_MARGIN); +} + +bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr, + uint64_t bytes) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + + ocf_seq_cutoff_policy policy = ocf_core_get_seq_cutoff_policy(core); + + switch (policy) { + case ocf_seq_cutoff_policy_always: + break; + + case ocf_seq_cutoff_policy_full: + if (ocf_seq_cutoff_is_on(cache)) + break; + + case ocf_seq_cutoff_policy_never: + return false; + default: + ENV_WARN(true, "Invalid sequential cutoff policy!"); + return false; + } + + if (dir == core->seq_cutoff.rw && + core->seq_cutoff.last == addr && + core->seq_cutoff.bytes + bytes >= + ocf_core_get_seq_cutoff_threshold(core)) { + return true; + } + + return false; +} + +void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req) +{ + /* + * If IO is not consequent or has another direction, + * reset sequential cutoff state. + */ + if (req->byte_position != core->seq_cutoff.last || + req->rw != core->seq_cutoff.rw) { + core->seq_cutoff.rw = req->rw; + core->seq_cutoff.bytes = 0; + } + + /* Update last accessed position and bytes counter */ + core->seq_cutoff.last = req->byte_position + req->byte_length; + core->seq_cutoff.bytes += req->byte_length; +} + +ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache, + ocf_core_t core, struct ocf_io *io) +{ + ocf_cache_mode_t mode; + + if (cache->pt_unaligned_io && !ocf_rq_is_4k(io->addr, io->bytes)) + return ocf_cache_mode_pt; + + mode = ocf_part_get_cache_mode(cache, + ocf_part_class2id(cache, io->class)); + if (!ocf_cache_mode_is_valid(mode)) + mode = cache->conf_meta->cache_mode; + + if (ocf_seq_cutoff_check(core, io->dir, io->addr, io->bytes)) + mode = ocf_cache_mode_pt; + + if (ocf_fallback_pt_is_on(cache)) + mode = ocf_cache_mode_pt; + + if (mode == ocf_cache_mode_wb && + env_atomic_read(&cache->flush_started)) + mode = ocf_cache_mode_wt; + + return mode; +} + +int ocf_engine_hndl_rq(struct ocf_request *rq, + ocf_req_cache_mode_t req_cache_mode) +{ + ocf_cache_t cache = rq->cache; + + OCF_CHECK_NULL(cache); + + rq->io_if = ocf_get_io_if(req_cache_mode); + if (!rq->io_if) + return -EINVAL; + + /* Till OCF engine is not synchronous fully need to push OCF request + * to into OCF workers + */ + + ocf_engine_push_rq_back(rq, true); + + return 0; +} + +int ocf_engine_hndl_fast_rq(struct ocf_request *rq, + ocf_req_cache_mode_t req_cache_mode) +{ + const struct ocf_io_if *io_if; + + io_if = ocf_get_io_if(req_cache_mode); + if (!io_if) + return -EINVAL; + + switch (rq->rw) { + case OCF_READ: + return io_if->read(rq); + case OCF_WRITE: + return io_if->write(rq); + default: + return OCF_FAST_PATH_NO; + } +} + +static void ocf_engine_hndl_2dc_rq(struct ocf_request *rq) +{ + if (OCF_READ == rq->rw) + IO_IFS[OCF_IO_D2C_IF].read(rq); + else if (OCF_WRITE == rq->rw) + IO_IFS[OCF_IO_D2C_IF].write(rq); + else + ENV_BUG(); +} + +void ocf_engine_hndl_discard_rq(struct ocf_request *rq) +{ + if (rq->d2c) { + ocf_engine_hndl_2dc_rq(rq); + return; + } + + if (OCF_READ == rq->rw) + IO_IFS[OCF_IO_DISCARD_IF].read(rq); + else if (OCF_WRITE == rq->rw) + IO_IFS[OCF_IO_DISCARD_IF].write(rq); + else + ENV_BUG(); +} + +void ocf_engine_hndl_ops_rq(struct ocf_request *rq) +{ + if (rq->d2c) + rq->io_if = &IO_IFS[OCF_IO_D2C_IF]; + else + rq->io_if = &IO_IFS[OCF_IO_OPS_IF]; + + ocf_engine_push_rq_back(rq, true); +} diff --git a/src/engine/cache_engine.h b/src/engine/cache_engine.h new file mode 100644 index 0000000..0514820 --- /dev/null +++ b/src/engine/cache_engine.h @@ -0,0 +1,82 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __CACHE_ENGINE_H_ +#define __CACHE_ENGINE_H_ + +struct ocf_thread_priv; +struct ocf_request; + +#define LOOKUP_HIT 5 +#define LOOKUP_MISS 6 +#define LOOKUP_MAPPED 8 + +typedef enum { + /* modes inherited from user API */ + ocf_req_cache_mode_wt = ocf_cache_mode_wt, + ocf_req_cache_mode_wb = ocf_cache_mode_wb, + ocf_req_cache_mode_wa = ocf_cache_mode_wa, + ocf_req_cache_mode_pt = ocf_cache_mode_pt, + ocf_req_cache_mode_wi = ocf_cache_mode_wi, + + /* internal modes */ + ocf_req_cache_mode_fast, + /*!< Fast path */ + ocf_req_cache_mode_d2c, + /*!< Direct to Core - pass through to core without + touching cacheline metadata */ + + ocf_req_cache_mode_max, +} ocf_req_cache_mode_t; + +struct ocf_io_if { + int (*read)(struct ocf_request *req); + + int (*write)(struct ocf_request *req); + + const char *name; +}; + +ocf_cache_mode_t ocf_get_effective_cache_mode(ocf_cache_t cache, + ocf_core_t core, struct ocf_io *io); + +const struct ocf_io_if *ocf_get_io_if(ocf_req_cache_mode_t cache_mode); + +static inline const char *ocf_get_io_iface_name(ocf_cache_mode_t cache_mode) +{ + const struct ocf_io_if *iface = ocf_get_io_if(cache_mode); + + return iface ? iface->name : "Unknown"; +} + +static inline bool ocf_cache_mode_is_valid(ocf_cache_mode_t mode) +{ + return mode >= ocf_cache_mode_wt && mode < ocf_cache_mode_max; +} + +void ocf_seq_cutoff_update(ocf_core_t core, struct ocf_request *req); + +bool ocf_fallback_pt_is_on(ocf_cache_t cache); + +bool ocf_seq_cutoff_check(ocf_core_t core, uint32_t dir, uint64_t addr, + uint64_t bytes); + +struct ocf_request *ocf_engine_pop_rq(struct ocf_cache *cache, + struct ocf_queue *q); + +int ocf_engine_hndl_rq(struct ocf_request *rq, + ocf_req_cache_mode_t req_cache_mode); + +#define OCF_FAST_PATH_YES 7 +#define OCF_FAST_PATH_NO 13 + +int ocf_engine_hndl_fast_rq(struct ocf_request *rq, + ocf_req_cache_mode_t req_cache_mode); + +void ocf_engine_hndl_discard_rq(struct ocf_request *rq); + +void ocf_engine_hndl_ops_rq(struct ocf_request *rq); + +#endif diff --git a/src/engine/engine_bf.c b/src/engine/engine_bf.c new file mode 100644 index 0000000..174c349 --- /dev/null +++ b/src/engine/engine_bf.c @@ -0,0 +1,105 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "../ocf_ctx_priv.h" +#include "engine_bf.h" +#include "engine_inv.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "bf" +#include "engine_debug.h" + +/* Decrements and checks if queue may be unblocked again */ +static inline void backfill_queue_dec_unblock(struct ocf_cache *cache) +{ + env_atomic_dec(&cache->pending_read_misses_list_count); + + if (!env_atomic_read(&cache->pending_read_misses_list_blocked)) + return; + + if (env_atomic_read(&cache->pending_read_misses_list_count) + < cache->backfill.queue_unblock_size) + env_atomic_set(&cache->pending_read_misses_list_blocked, 0); +} + +static inline void backfill_queue_inc_block(struct ocf_cache *cache) +{ + if (env_atomic_inc_return(&cache->pending_read_misses_list_count) + >= cache->backfill.max_queue_size) + env_atomic_set(&cache->pending_read_misses_list_blocked, 1); +} + +static void _ocf_backfill_do_io(void *private_data, int error) +{ + struct ocf_request *rq = (struct ocf_request *)private_data; + struct ocf_cache *cache = rq->cache; + + if (error) + rq->error = error; + + if (rq->error) + inc_fallback_pt_error_counter(rq->cache); + + /* Handle callback-caller race to let only one of the two complete the + * request. Also, complete original request only if this is the last + * sub-request to complete + */ + if (env_atomic_dec_return(&rq->req_remaining) == 0) { + /* We must free the pages we have allocated */ + ctx_data_secure_erase(cache->owner, rq->data); + ctx_data_munlock(cache->owner, rq->data); + ctx_data_free(cache->owner, rq->data); + rq->data = NULL; + + if (rq->error) { + env_atomic_inc(&cache->core_obj[rq->core_id]. + counters->cache_errors.write); + ocf_engine_invalidate(rq); + } else { + ocf_rq_unlock(rq); + + /* always free the request at the last point + * of the completion path + */ + ocf_rq_put(rq); + } + } +} + +static int _ocf_backfill_do(struct ocf_request *rq) +{ + unsigned int reqs_to_issue; + + backfill_queue_dec_unblock(rq->cache); + + reqs_to_issue = ocf_engine_io_count(rq); + + /* There will be #reqs_to_issue completions */ + env_atomic_set(&rq->req_remaining, reqs_to_issue); + + rq->data = rq->cp_data; + + ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_WRITE, reqs_to_issue, + _ocf_backfill_do_io, rq); + + return 0; +} + +static const struct ocf_io_if _io_if_backfill = { + .read = _ocf_backfill_do, + .write = _ocf_backfill_do, +}; + +void ocf_engine_backfill(struct ocf_request *rq) +{ + backfill_queue_inc_block(rq->cache); + ocf_engine_push_rq_front_if(rq, &_io_if_backfill, true); +} diff --git a/src/engine/engine_bf.h b/src/engine/engine_bf.h new file mode 100644 index 0000000..5532c8f --- /dev/null +++ b/src/engine/engine_bf.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_BF_H_ +#define ENGINE_BF_H_ + +void ocf_engine_backfill(struct ocf_request *rq); + +#endif /* ENGINE_BF_H_ */ diff --git a/src/engine/engine_common.c b/src/engine/engine_common.c new file mode 100644 index 0000000..7e7ee09 --- /dev/null +++ b/src/engine/engine_common.c @@ -0,0 +1,621 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "../ocf_queue_priv.h" +#include "engine_common.h" +#define OCF_ENGINE_DEBUG_IO_NAME "common" +#include "engine_debug.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_cleaner.h" +#include "../metadata/metadata.h" +#include "../layer_space_management.h" + +void ocf_engine_error(struct ocf_request *rq, + bool stop_cache, const char *msg) +{ + struct ocf_cache *cache = rq->cache; + + if (stop_cache) + env_bit_clear(ocf_cache_state_running, &cache->cache_state); + + ocf_core_log(&cache->core_obj[rq->core_id], log_err, + "%s sector: %" ENV_PRIu64 ", bytes: %u\n", msg, + BYTES_TO_SECTORS(rq->byte_position), rq->byte_length); +} + +void ocf_engine_lookup_map_entry(struct ocf_cache *cache, + struct ocf_map_info *entry, ocf_core_id_t core_id, + uint64_t core_line) +{ + ocf_cache_line_t line; + ocf_cache_line_t hash_key; + + hash_key = ocf_metadata_hash_func(cache, core_line, core_id); + + /* Initially assume that we have cache miss. + * Hash points to proper bucket. + */ + entry->hash_key = hash_key; + entry->status = LOOKUP_MISS; + entry->coll_idx = cache->device->collision_table_entries; + entry->core_line = core_line; + + line = ocf_metadata_get_hash(cache, hash_key); + + while (line != cache->device->collision_table_entries) { + ocf_core_id_t curr_core_id; + uint64_t curr_core_line; + + ocf_metadata_get_core_info(cache, line, &curr_core_id, + &curr_core_line); + + if (core_id == curr_core_id && curr_core_line == core_line) { + entry->coll_idx = line; + entry->status = LOOKUP_HIT; + break; + } + + line = ocf_metadata_get_collision_next(cache, line); + } +} + +static inline int _ocf_engine_check_map_entry(struct ocf_cache *cache, + struct ocf_map_info *entry, ocf_core_id_t core_id) +{ + ocf_core_id_t _core_id; + uint64_t _core_line; + + if (entry->status == LOOKUP_MISS) + return 0; + + ENV_BUG_ON(entry->coll_idx >= cache->device->collision_table_entries); + + ocf_metadata_get_core_info(cache, entry->coll_idx, &_core_id, + &_core_line); + + if (core_id == _core_id && _core_line == entry->core_line) + return 0; + else + return -1; +} + +void ocf_engine_update_rq_info(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t entry) +{ + uint8_t start_sector = 0; + uint8_t end_sector = ocf_line_end_sector(cache); + struct ocf_map_info *_entry = &(rq->map[entry]); + + if (entry == 0) { + start_sector = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + } + + if (entry == rq->core_line_count - 1) { + end_sector = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1)% ocf_line_sectors(cache); + } + + /* Handle return value */ + switch (_entry->status) { + case LOOKUP_HIT: + if (metadata_test_valid_sec(cache, _entry->coll_idx, + start_sector, end_sector)) { + rq->info.hit_no++; + } else { + rq->info.invalid_no++; + } + + /* Check request is dirty */ + if (metadata_test_dirty(cache, _entry->coll_idx)) { + rq->info.dirty_any++; + + /* Check if cache line is fully dirty */ + if (metadata_test_dirty_all(cache, _entry->coll_idx)) + rq->info.dirty_all++; + } + + if (rq->part_id != ocf_metadata_get_partition_id(cache, + _entry->coll_idx)) { + /* + * Need to move this cache line into other partition + */ + _entry->re_part = rq->info.re_part = true; + } + + break; + case LOOKUP_MISS: + rq->info.seq_req = false; + break; + case LOOKUP_MAPPED: + break; + default: + ENV_BUG(); + break; + } + + /* Check if cache hit is sequential */ + if (rq->info.seq_req && entry) { + if (ocf_metadata_map_lg2phy(cache, + (rq->map[entry - 1].coll_idx)) + 1 != + ocf_metadata_map_lg2phy(cache, + _entry->coll_idx)) { + rq->info.seq_req = false; + } + } +} + +void ocf_engine_traverse(struct ocf_request *rq) +{ + uint32_t i; + uint64_t core_line; + + struct ocf_cache *cache = rq->cache; + ocf_core_id_t core_id = rq->core_id; + + OCF_DEBUG_TRACE(rq->cache); + + ocf_rq_clear_info(rq); + rq->info.seq_req = true; + + for (i = 0, core_line = rq->core_line_first; + core_line <= rq->core_line_last; core_line++, i++) { + + struct ocf_map_info *entry = &(rq->map[i]); + + ocf_engine_lookup_map_entry(cache, entry, core_id, + core_line); + + if (entry->status != LOOKUP_HIT) { + rq->info.seq_req = false; + /* There is miss then lookup for next map entry */ + OCF_DEBUG_PARAM(cache, "Miss, core line = %llu", + entry->core_line); + continue; + } + + OCF_DEBUG_PARAM(cache, "Hit, cache line %u, core line = %llu", + entry->coll_idx, entry->core_line); + + /* Update eviction (LRU) */ + ocf_eviction_set_hot_cache_line(cache, entry->coll_idx); + + ocf_engine_update_rq_info(cache, rq, i); + } + + OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ? + "Yes" : "No"); +} + +int ocf_engine_check(struct ocf_request *rq) +{ + int result = 0; + uint32_t i; + uint64_t core_line; + + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + ocf_rq_clear_info(rq); + rq->info.seq_req = true; + + for (i = 0, core_line = rq->core_line_first; + core_line <= rq->core_line_last; core_line++, i++) { + + struct ocf_map_info *entry = &(rq->map[i]); + + if (entry->status == LOOKUP_MISS) { + rq->info.seq_req = false; + continue; + } + + if (_ocf_engine_check_map_entry(cache, entry, rq->core_id)) { + /* Mapping is invalid */ + entry->invalid = true; + rq->info.seq_req = false; + + OCF_DEBUG_PARAM(cache, "Invalid, Cache line %u", + entry->coll_idx); + + result = -1; + } else { + entry->invalid = false; + + OCF_DEBUG_PARAM(cache, "Valid, Cache line %u", + entry->coll_idx); + + ocf_engine_update_rq_info(cache, rq, i); + } + } + + OCF_DEBUG_PARAM(cache, "Sequential - %s", rq->info.seq_req ? + "Yes" : "No"); + + return result; +} + +static void ocf_engine_map_cache_line(struct ocf_request *rq, + uint64_t core_line, unsigned int hash_index, + ocf_cache_line_t *cache_line) +{ + struct ocf_cache *cache = rq->cache; + ocf_part_id_t part_id = rq->part_id; + ocf_cleaning_t clean_policy_type; + + if (cache->device->freelist_part->curr_size == 0) { + rq->info.eviction_error = 1; + return; + } + + *cache_line = cache->device->freelist_part->head; + + /* add_to_collision_list changes .next_col and other fields for entry + * so updated last_cache_line_give must be updated before calling it. + */ + + ocf_metadata_remove_from_free_list(cache, *cache_line); + + ocf_metadata_add_to_partition(cache, part_id, *cache_line); + + /* Add the block to the corresponding collision list */ + ocf_metadata_add_to_collision(cache, rq->core_id, core_line, hash_index, + *cache_line); + + ocf_eviction_init_cache_line(cache, *cache_line, part_id); + + /* Update LRU:: Move this node to head of lru list. */ + ocf_eviction_set_hot_cache_line(cache, *cache_line); + + /* Update dirty cache-block list */ + clean_policy_type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max); + + if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL) + cleaning_policy_ops[clean_policy_type]. + init_cache_block(cache, *cache_line); +} + +static void ocf_engine_map_hndl_error(struct ocf_cache *cache, + struct ocf_request *rq) +{ + uint32_t i; + struct ocf_map_info *entry; + + for (i = 0; i < rq->core_line_count; i++) { + entry = &(rq->map[i]); + + switch (entry->status) { + case LOOKUP_HIT: + case LOOKUP_MISS: + break; + + case LOOKUP_MAPPED: + OCF_DEBUG_RQ(rq, "Canceling cache line %u", + entry->coll_idx); + set_cache_line_invalid_no_flush(cache, 0, + ocf_line_end_sector(cache), + entry->coll_idx); + break; + + default: + ENV_BUG(); + break; + } + } +} + +void ocf_engine_map(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + uint32_t i; + struct ocf_map_info *entry; + uint64_t core_line; + int status = LOOKUP_MAPPED; + ocf_core_id_t core_id = rq->core_id; + + if (ocf_engine_unmapped_count(rq)) + status = space_managment_evict_do(cache, rq, + ocf_engine_unmapped_count(rq)); + + if (rq->info.eviction_error) + return; + + ocf_rq_clear_info(rq); + rq->info.seq_req = true; + + OCF_DEBUG_TRACE(rq->cache); + + for (i = 0, core_line = rq->core_line_first; + core_line <= rq->core_line_last; core_line++, i++) { + entry = &(rq->map[i]); + + ocf_engine_lookup_map_entry(cache, entry, core_id, core_line); + + if (entry->status != LOOKUP_HIT) { + ocf_engine_map_cache_line(rq, entry->core_line, + entry->hash_key, &entry->coll_idx); + + if (rq->info.eviction_error) { + /* + * Eviction error (mapping error), need to + * clean, return and do pass through + */ + OCF_DEBUG_RQ(rq, "Eviction ERROR when mapping"); + ocf_engine_map_hndl_error(cache, rq); + break; + } + + entry->status = status; + } + + OCF_DEBUG_PARAM(rq->cache, + "%s, cache line %u, core line = %llu", + entry->status == LOOKUP_HIT ? "Hit" : "Map", + entry->coll_idx, entry->core_line); + + ocf_engine_update_rq_info(cache, rq, i); + + } + + OCF_DEBUG_PARAM(rq->cache, "Sequential - %s", rq->info.seq_req ? + "Yes" : "No"); +} + +static void _ocf_engine_clean_end(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + OCF_DEBUG_RQ(rq, "Cleaning ERROR"); + rq->error |= error; + + /* End request and do not processing */ + ocf_rq_unlock(rq); + + /* Complete request */ + rq->complete(rq, error); + + /* Release OCF request */ + ocf_rq_put(rq); + } else { + rq->info.dirty_any = 0; + rq->info.dirty_all = 0; + ocf_engine_push_rq_front(rq, true); + } +} + +static int _ocf_engine_clean_getter(struct ocf_cache *cache, + void *getter_context, uint32_t item, ocf_cache_line_t *line) +{ + struct ocf_cleaner_attribs *attribs = getter_context; + struct ocf_request *rq = attribs->cmpl_context; + + for (; attribs->getter_item < rq->core_line_count; + attribs->getter_item++) { + + struct ocf_map_info *entry = &rq->map[attribs->getter_item]; + + if (entry->status != LOOKUP_HIT) + continue; + + if (!metadata_test_dirty(cache, entry->coll_idx)) + continue; + + /* Line to be cleaned found, go to next item and return */ + *line = entry->coll_idx; + attribs->getter_item++; + return 0; + } + + return -1; +} + +void ocf_engine_clean(struct ocf_request *rq) +{ + /* Initialize attributes for cleaner */ + struct ocf_cleaner_attribs attribs = { + .cache_line_lock = false, + + .cmpl_context = rq, + .cmpl_fn = _ocf_engine_clean_end, + + .getter = _ocf_engine_clean_getter, + .getter_context = &attribs, + .getter_item = 0, + + .count = rq->info.dirty_any, + .io_queue = rq->io_queue + }; + + /* Start cleaning */ + ocf_cleaner_fire(rq->cache, &attribs); +} + +void ocf_engine_update_block_stats(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + ocf_core_id_t core_id = rq->core_id; + ocf_part_id_t part_id = rq->part_id; + struct ocf_counters_block *blocks; + + blocks = &cache->core_obj[core_id].counters-> + part_counters[part_id].blocks; + + if (rq->rw == OCF_READ) + env_atomic64_add(rq->byte_length, &blocks->read_bytes); + else if (rq->rw == OCF_WRITE) + env_atomic64_add(rq->byte_length, &blocks->write_bytes); + else + ENV_BUG(); +} + +void ocf_engine_update_request_stats(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + ocf_core_id_t core_id = rq->core_id; + ocf_part_id_t part_id = rq->part_id; + struct ocf_counters_req *reqs; + + switch (rq->rw) { + case OCF_READ: + reqs = &cache->core_obj[core_id].counters-> + part_counters[part_id].read_reqs; + break; + case OCF_WRITE: + reqs = &cache->core_obj[core_id].counters-> + part_counters[part_id].write_reqs; + break; + default: + ENV_BUG(); + } + + env_atomic64_inc(&reqs->total); + + if (rq->info.hit_no == 0) + env_atomic64_inc(&reqs->full_miss); + else if (rq->info.hit_no < rq->core_line_count) + env_atomic64_inc(&reqs->partial_miss); +} + +void ocf_engine_push_rq_back(struct ocf_request *rq, bool allow_sync) +{ + struct ocf_cache *cache = rq->cache; + struct ocf_queue *q = NULL; + unsigned long lock_flags; + + INIT_LIST_HEAD(&rq->list); + + ENV_BUG_ON(rq->io_queue >= cache->io_queues_no); + q = &cache->io_queues[rq->io_queue]; + + env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags); + + list_add_tail(&rq->list, &q->io_list); + env_atomic_inc(&q->io_no); + + env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags); + + if (!rq->info.internal) + env_atomic_set(&cache->last_access_ms, + env_ticks_to_msecs(env_get_tick_count())); + + ctx_queue_kick(cache->owner, q, allow_sync); +} + +void ocf_engine_push_rq_front(struct ocf_request *rq, bool allow_sync) +{ + struct ocf_cache *cache = rq->cache; + struct ocf_queue *q = NULL; + unsigned long lock_flags; + + INIT_LIST_HEAD(&rq->list); + + ENV_BUG_ON(rq->io_queue >= cache->io_queues_no); + q = &cache->io_queues[rq->io_queue]; + + env_spinlock_lock_irqsave(&q->io_list_lock, lock_flags); + + list_add(&rq->list, &q->io_list); + env_atomic_inc(&q->io_no); + + env_spinlock_unlock_irqrestore(&q->io_list_lock, lock_flags); + + if (!rq->info.internal) + env_atomic_set(&cache->last_access_ms, + env_ticks_to_msecs(env_get_tick_count())); + + ctx_queue_kick(cache->owner, q, allow_sync); +} + +void ocf_engine_push_rq_front_if(struct ocf_request *rq, + const struct ocf_io_if *io_if, + bool allow_sync) +{ + rq->error = 0; /* Please explain why!!! */ + rq->io_if = io_if; + ocf_engine_push_rq_front(rq, allow_sync); +} + +void inc_fallback_pt_error_counter(ocf_cache_t cache) +{ + ENV_BUG_ON(env_atomic_read(&cache->fallback_pt_error_counter) < 0); + + if (cache->fallback_pt_error_threshold == OCF_CACHE_FALLBACK_PT_INACTIVE) + return; + + if (env_atomic_inc_return(&cache->fallback_pt_error_counter) == + cache->fallback_pt_error_threshold) { + ocf_cache_log(cache, log_info, "Error threshold reached. " + "Fallback Pass Through activated\n"); + } +} + +static int _ocf_engine_refresh(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + int result; + + OCF_METADATA_LOCK_RD(); + /* Check under metadata RD lock */ + + result = ocf_engine_check(rq); + + OCF_METADATA_UNLOCK_RD(); + + if (result == 0) { + + /* Refresh successful, can process with original IO interface */ + rq->io_if = rq->priv; + + rq->resume = NULL; + rq->priv = NULL; + + if (rq->rw == OCF_READ) + rq->io_if->read(rq); + else if (rq->rw == OCF_WRITE) + rq->io_if->write(rq); + else + ENV_BUG(); + } else { + ENV_WARN(true, OCF_PREFIX_SHORT" Inconsistent request"); + rq->error = -EINVAL; + + /* Complete request */ + rq->complete(rq, rq->error); + + /* Release WRITE lock of request */ + ocf_rq_unlock(rq); + + /* Release OCF request */ + ocf_rq_put(rq); + } + + return 0; +} + +static const struct ocf_io_if _io_if_refresh = { + .read = _ocf_engine_refresh, + .write = _ocf_engine_refresh, +}; + +void ocf_engine_on_resume(struct ocf_request *rq) +{ + ENV_BUG_ON(rq->priv); + ENV_BUG_ON(ocf_engine_on_resume != rq->resume); + OCF_CHECK_NULL(rq->io_if); + + /* Exchange IO interface */ + rq->priv = (void *)rq->io_if; + + OCF_DEBUG_RQ(rq, "On resume"); + + ocf_engine_push_rq_front_if(rq, &_io_if_refresh, false); +} diff --git a/src/engine/engine_common.h b/src/engine/engine_common.h new file mode 100644 index 0000000..2934aac --- /dev/null +++ b/src/engine/engine_common.h @@ -0,0 +1,223 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_COMMON_H_ +#define ENGINE_COMMON_H_ + +#include "../ocf_request.h" + +/** + * @file engine_common.h + * @brief OCF cache engine common module + */ + +/** + * @brief Signal and handle OCF request error + * + * @param rq OCF request + * @param stop_cache Indicates if OCF cache engine need to be stopped + * @param msg Error message to be printed into log + */ +void ocf_engine_error(struct ocf_request *rq, bool stop_cache, + const char *msg); + +/** + * @brief Check if OCF request is hit + * + * @param rq OCF request + * + * @retval true HIT + * @retval false MISS + */ +static inline bool ocf_engine_is_hit(struct ocf_request *rq) +{ + return rq->info.hit_no == rq->core_line_count; +} + +/** + * @brief Check if OCF request is miss + * + * @param rq OCF request + * + * @retval true MISS + * @retval false HIT + */ +#define ocf_engine_is_miss(rq) (!ocf_engine_is_hit(rq)) + +/** + * @brief Check if all cache lines are mapped fully + * + * @param rq OCF request + * + * @retval true request is mapped fully + * @retval false request is not mapped fully and eviction might be run in + * order to complete mapping + */ +static inline bool ocf_engine_is_mapped(struct ocf_request *rq) +{ + return rq->info.hit_no + rq->info.invalid_no == rq->core_line_count; +} + +/** + * @brief Check if all cache lines are dirty + * + * @param rq OCF request + * + * @retval true request is dirty fully + * @retval false request is not dirty fully + */ +static inline bool ocf_engine_is_dirty_all(struct ocf_request *rq) +{ + return rq->info.dirty_all == rq->core_line_count; +} + +/** + * @brief Get number of mapped cache lines + * + * @param rq OCF request + * + * @return Number of mapped cache lines + */ +static inline uint32_t ocf_engine_mapped_count(struct ocf_request *rq) +{ + return rq->info.hit_no + rq->info.invalid_no; +} + +/** + * @brief Get number of unmapped cache lines + * + * @param rq OCF request + * + * @return Number of unmapped cache lines + */ +static inline uint32_t ocf_engine_unmapped_count(struct ocf_request *rq) +{ + return rq->core_line_count - (rq->info.hit_no + rq->info.invalid_no); +} + +/** + * @brief Get number of IOs to perform cache read or write + * + * @param rq OCF request + * + * @return Count of cache IOs + */ +static inline uint32_t ocf_engine_io_count(struct ocf_request *rq) +{ + return rq->info.seq_req ? 1 : rq->core_line_count; +} + +/** + * @brief Clean request (flush dirty data to the core device) + * + * @param rq OCF request + * + * @note After successful cleaning: + * - Dirty status bits in request info will be cleared + * - Request will be pushed front, IO interface need to be set + * + * @note In case of failure: + * - unlock request + * - complete request to the application + * - free request + */ +void ocf_engine_clean(struct ocf_request *rq); + +void ocf_engine_lookup_map_entry(struct ocf_cache *cache, + struct ocf_map_info *entry, ocf_core_id_t core_id, + uint64_t core_line); + +/** + * @brief Traverse request in order to lookup cache lines If there are misses + * need to call eviction. This process is called 'mapping'. + * + * @note This function CALL EVICTION + * + * @param rq OCF request + */ +void ocf_engine_map(struct ocf_request *rq); + +/** + * @brief Traverse OCF request (lookup cache) + * + * @note This function DO NOT CALL EVICTION. Only lookup in metadata is + * performed. Main purpose of this function is to check if there is a HIT. + * + * @param rq OCF request + */ +void ocf_engine_traverse(struct ocf_request *rq); + +/** + * @brief Check if OCF request mapping is still valid + * + * @note If mapping entries is invalid it will be marked + * + * @param rq OCF request + * + * @retval 0 - OCF request mapping is valid + * @return Non zero - OCF request mapping is invalid and need to call re-mapping + */ +int ocf_engine_check(struct ocf_request *rq); + +/** + * @brief Update OCF request info + * + * @param rq OCF request + */ +void ocf_engine_update_rq_info(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t entry); + +/** + * @brief Update OCF request block statistics for an exported object + * + * @param rq OCF request + */ +void ocf_engine_update_block_stats(struct ocf_request *rq); + +/** + * @brief Update OCF request request statistics for an exported object + * (not applicable to write wi and to read wt + * + * @param rq OCF request + */ +void ocf_engine_update_request_stats(struct ocf_request *rq); + +/** + * @brief Push front OCF request to the OCF thread worker queue + * + * @param rq OCF request + * @param allow_sync caller allows for request from queue to be ran immediately + from push function in caller context + */ +void ocf_engine_push_rq_back(struct ocf_request *rq, + bool allow_sync); + +/** + * @brief Push back OCF request to the OCF thread worker queue + * + * @param rq OCF request + * @param allow_sync caller allows for request from queue to be ran immediately + from push function in caller context + */ +void ocf_engine_push_rq_front(struct ocf_request *rq, + bool allow_sync); + +/** + * @brief Set interface and push from request to the OCF thread worker queue + * + * @param rq OCF request + * @param io_if IO interface + * @param allow_sync caller allows for request from queue to be ran immediately + from push function in caller context + */ +void ocf_engine_push_rq_front_if(struct ocf_request *rq, + const struct ocf_io_if *io_if, + bool allow_sync); + +void inc_fallback_pt_error_counter(ocf_cache_t cache); + +void ocf_engine_on_resume(struct ocf_request *rq); + +#endif /* ENGINE_COMMON_H_ */ diff --git a/src/engine/engine_d2c.c b/src/engine/engine_d2c.c new file mode 100644 index 0000000..b261242 --- /dev/null +++ b/src/engine/engine_d2c.c @@ -0,0 +1,72 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_d2c.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../metadata/metadata.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "d2c" +#include "engine_debug.h" + +static void _ocf_d2c_completion(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + rq->error = error; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + rq->info.core_error = 1; + if (rq->rw == OCF_READ) { + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.read); + } else { + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.write); + } + } + + /* Complete request */ + rq->complete(rq, rq->error); + + /* Release OCF request */ + ocf_rq_put(rq); +} + +int ocf_io_d2c(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + ocf_io_start(rq->io); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw, + _ocf_d2c_completion, rq); + + ocf_engine_update_block_stats(rq); + + if (rq->rw == OCF_READ) { + env_atomic64_inc(&cache->core_obj[rq->core_id].counters-> + part_counters[rq->part_id].read_reqs.pass_through); + } else { + env_atomic64_inc(&cache->core_obj[rq->core_id].counters-> + part_counters[rq->part_id].write_reqs.pass_through); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; + +} diff --git a/src/engine/engine_d2c.h b/src/engine/engine_d2c.h new file mode 100644 index 0000000..41b7785 --- /dev/null +++ b/src/engine/engine_d2c.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_2DC_H_ +#define ENGINE_2DC_H_ + +int ocf_io_d2c(struct ocf_request *rq); + +#endif /* ENGINE_2DC_H_ */ diff --git a/src/engine/engine_debug.h b/src/engine/engine_debug.h new file mode 100644 index 0000000..e3792b4 --- /dev/null +++ b/src/engine/engine_debug.h @@ -0,0 +1,48 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_DEBUG_H_ +#define ENGINE_DEBUG_H_ + +#ifndef OCF_ENGINE_DEBUG +#define OCF_ENGINE_DEBUG 0 +#endif + +#if 1 == OCF_ENGINE_DEBUG + +#ifndef OCF_ENGINE_DEBUG_IO_NAME +#define OCF_ENGINE_DEBUG_IO_NAME "null" +#endif + +#define OCF_DEBUG_PREFIX "[Engine][%s] %s " + +#define OCF_DEBUG_LOG(cache, format, ...) \ + ocf_cache_log_prefix(cache, log_info, OCF_DEBUG_PREFIX, \ + format"\n", OCF_ENGINE_DEBUG_IO_NAME, __func__, \ + ##__VA_ARGS__) + +#define OCF_DEBUG_TRACE(cache) OCF_DEBUG_LOG(cache, "") + +#define OCF_DEBUG_MSG(cache, msg) OCF_DEBUG_LOG(cache, "- %s", msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) OCF_DEBUG_LOG(cache, "- "format, \ + ##__VA_ARGS__) + +#define OCF_DEBUG_RQ(rq, format, ...) \ + ocf_cache_log(rq->cache, log_info, "[Engine][%s][%s, %llu, %u] %s - " \ + format"\n", OCF_ENGINE_DEBUG_IO_NAME, \ + OCF_READ == (rq)->rw ? "RD" : "WR", rq->byte_position, \ + rq->byte_length, __func__, ##__VA_ARGS__) + +#else +#define OCF_DEBUG_PREFIX +#define OCF_DEBUG_LOG(cache, format, ...) +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#define OCF_DEBUG_RQ(rq, format, ...) +#endif + +#endif /* ENGINE_DEBUG_H_ */ diff --git a/src/engine/engine_discard.c b/src/engine/engine_discard.c new file mode 100644 index 0000000..792f54c --- /dev/null +++ b/src/engine/engine_discard.c @@ -0,0 +1,248 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "cache_engine.h" +#include "engine_common.h" +#include "engine_discard.h" +#include "../metadata/metadata.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG 0 + +#define OCF_ENGINE_DEBUG_IO_NAME "discard" +#include "engine_debug.h" + +static int _ocf_discard_step_do(struct ocf_request *rq); +static int _ocf_discard_step(struct ocf_request *rq); +static int _ocf_discard_flush_cache(struct ocf_request *rq); +static int _ocf_discard_core(struct ocf_request *rq); + +static const struct ocf_io_if _io_if_discard_step = { + .read = _ocf_discard_step, + .write = _ocf_discard_step +}; + +static const struct ocf_io_if _io_if_discard_step_resume = { + .read = _ocf_discard_step_do, + .write = _ocf_discard_step_do +}; + +static const struct ocf_io_if _io_if_discard_flush_cache = { + .read = _ocf_discard_flush_cache, + .write = _ocf_discard_flush_cache, +}; + +static const struct ocf_io_if _io_if_discard_core = { + .read = _ocf_discard_core, + .write = _ocf_discard_core +}; + +static void _ocf_discard_complete_rq(struct ocf_request *rq, int error) +{ + rq->complete(rq, error); + + ocf_rq_put(rq); +} + +static void _ocf_discard_core_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + OCF_DEBUG_RQ(rq, "Core DISCARD Completion"); + + _ocf_discard_complete_rq(rq, error); +} + +static int _ocf_discard_core(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + ocf_submit_obj_discard(&cache->core_obj[rq->core_id].obj, rq, + _ocf_discard_core_io, rq); + + return 0; +} + +static void _ocf_discard_cache_flush_io_cmpl(void *priv, int error) +{ + struct ocf_request *rq = priv; + + if (error) { + ocf_metadata_error(rq->cache); + _ocf_discard_complete_rq(rq, error); + return; + } + + rq->io_if = &_io_if_discard_core; + ocf_engine_push_rq_front(rq, true); +} + +static int _ocf_discard_flush_cache(struct ocf_request *rq) +{ + ocf_submit_obj_flush(&rq->cache->device->obj, + _ocf_discard_cache_flush_io_cmpl, rq); + + return 0; +} + +static void _ocf_discard_finish_step(struct ocf_request *rq) +{ + rq->discard.handled += BYTES_TO_SECTORS(rq->byte_length); + + if (rq->discard.handled < rq->discard.nr_sects) + rq->io_if = &_io_if_discard_step; + else if (rq->cache->device->init_mode != ocf_init_mode_metadata_volatile) + rq->io_if = &_io_if_discard_flush_cache; + else + rq->io_if = &_io_if_discard_core; + + ocf_engine_push_rq_front(rq, true); +} + +static void _ocf_discard_step_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + /* Release WRITE lock of request */ + ocf_rq_unlock_wr(rq); + + if (rq->error) { + ocf_metadata_error(rq->cache); + _ocf_discard_complete_rq(rq, rq->error); + return; + } + + _ocf_discard_finish_step(rq); +} + +int _ocf_discard_step_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + env_atomic_set(&rq->req_remaining, 1); /* One core IO */ + + if (ocf_engine_mapped_count(rq)) { + /* There are mapped cache line, need to remove them */ + + OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + + /* Remove mapped cache lines from metadata */ + ocf_purge_map_info(rq); + + if (rq->info.flush_metadata) { + /* Request was dirty and need to flush metadata */ + ocf_metadata_flush_do_asynch(cache, rq, + _ocf_discard_step_io); + } + + OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + } + + OCF_DEBUG_RQ(rq, "Discard"); + _ocf_discard_step_io(rq, 0); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static void _ocf_discard_on_resume(struct ocf_request *rq) +{ + OCF_DEBUG_RQ(rq, "On resume"); + ocf_engine_push_rq_front(rq, true); +} + +static int _ocf_discard_step(struct ocf_request *rq) +{ + int lock; + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + rq->byte_position = SECTORS_TO_BYTES(rq->discard.sector + + rq->discard.handled); + rq->byte_length = MIN(SECTORS_TO_BYTES(rq->discard.nr_sects - + rq->discard.handled), MAX_TRIM_RQ_SIZE); + rq->core_line_first = ocf_bytes_2_lines(cache, rq->byte_position); + rq->core_line_last = + ocf_bytes_2_lines(cache, rq->byte_position + rq->byte_length - 1); + rq->core_line_count = rq->core_line_last - rq->core_line_first + 1; + rq->io_if = &_io_if_discard_step_resume; + + OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + + ENV_BUG_ON(env_memset(rq->map, sizeof(*rq->map) * rq->core_line_count, + 0)); + + /* Travers to check if request is mapped fully */ + ocf_engine_traverse(rq); + + if (ocf_engine_mapped_count(rq)) { + /* Some cache line are mapped, lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } else { + lock = OCF_LOCK_ACQUIRED; + } + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + + if (lock >= 0) { + if (OCF_LOCK_ACQUIRED == lock) { + _ocf_discard_step_do(rq); + } else { + /* WR lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK") + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->error |= lock; + _ocf_discard_finish_step(rq); + } + + env_cond_resched(); + + return 0; +} + +int ocf_discard(struct ocf_request *rq) +{ + OCF_DEBUG_TRACE(rq->cache); + + ocf_io_start(rq->io); + + if (rq->rw == OCF_READ) { + rq->complete(rq, -EINVAL); + return 0; + } + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = _ocf_discard_on_resume; + + _ocf_discard_step(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} diff --git a/src/engine/engine_discard.h b/src/engine/engine_discard.h new file mode 100644 index 0000000..59d08c7 --- /dev/null +++ b/src/engine/engine_discard.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __ENGINE_DISCARD_H__ +#define __ENGINE_DISCARD_H__ + +int ocf_discard(struct ocf_request *rq); + +#endif diff --git a/src/engine/engine_fast.c b/src/engine/engine_fast.c new file mode 100644 index 0000000..41a1662 --- /dev/null +++ b/src/engine/engine_fast.c @@ -0,0 +1,235 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_fast.h" +#include "engine_common.h" +#include "engine_pt.h" +#include "engine_wb.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_part.h" +#include "../utils/utils_io.h" +#include "../concurrency/ocf_concurrency.h" +#include "../metadata/metadata.h" + +#define OCF_ENGINE_DEBUG 0 + +#define OCF_ENGINE_DEBUG_IO_NAME "fast" +#include "engine_debug.h" + +/* _____ _ ______ _ _____ _ _ + * | __ \ | | | ____| | | | __ \ | | | | + * | |__) |___ __ _ __| | | |__ __ _ ___| |_ | |__) |_ _| |_| |__ + * | _ // _ \/ _` |/ _` | | __/ _` / __| __| | ___/ _` | __| '_ \ + * | | \ \ __/ (_| | (_| | | | | (_| \__ \ |_ | | | (_| | |_| | | | + * |_| \_\___|\__,_|\__,_| |_| \__,_|___/\__| |_| \__,_|\__|_| |_| + */ + +static void _ocf_read_fast_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (env_atomic_dec_return(&rq->req_remaining)) { + /* Not all requests finished */ + return; + } + + OCF_DEBUG_RQ(rq, "HIT completion"); + + if (rq->error) { + OCF_DEBUG_RQ(rq, "ERROR"); + + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.read); + ocf_engine_push_rq_front_pt(rq); + } else { + ocf_rq_unlock(rq); + + /* Complete request */ + rq->complete(rq, rq->error); + + /* Free the request at the last point of the completion path */ + ocf_rq_put(rq); + } +} + +static int _ocf_read_fast_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + if (ocf_engine_is_miss(rq)) { + /* It seams that after resume, now request is MISS, do PT */ + OCF_DEBUG_RQ(rq, "Switching to read PT"); + ocf_read_pt_do(rq); + return 0; + + } + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + if (rq->info.re_part) { + OCF_DEBUG_RQ(rq, "Re-Part"); + + OCF_METADATA_LOCK_WR(); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_part_move(rq); + + OCF_METADATA_UNLOCK_WR(); + } + + /* Submit IO */ + OCF_DEBUG_RQ(rq, "Submit"); + env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); + ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ, + ocf_engine_io_count(rq), _ocf_read_fast_io, rq); + + + /* Updata statistics */ + ocf_engine_update_request_stats(rq); + ocf_engine_update_block_stats(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_read_fast_resume = { + .read = _ocf_read_fast_do, + .write = _ocf_read_fast_do, +}; + +int ocf_read_fast(struct ocf_request *rq) +{ + bool hit; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_read_fast_resume; + + /*- Metadata RD access -----------------------------------------------*/ + + OCF_METADATA_LOCK_RD(); + + /* Traverse request to cache if there is hit */ + ocf_engine_traverse(rq); + + hit = ocf_engine_is_hit(rq); + if (hit) { + ocf_io_start(rq->io); + lock = ocf_rq_trylock_rd(rq); + } + + OCF_METADATA_UNLOCK_RD(); + + if (hit) { + OCF_DEBUG_RQ(rq, "Fast path success"); + + if (lock >= 0) { + if (lock != OCF_LOCK_ACQUIRED) { + /* Lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } else { + /* Lock was acquired can perform IO */ + _ocf_read_fast_do(rq); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR"); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } else { + OCF_DEBUG_RQ(rq, "Fast path failure"); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + if (hit) + return OCF_FAST_PATH_YES; + else + return OCF_FAST_PATH_NO; +} + +/* __ __ _ _ ______ _ _____ _ _ + * \ \ / / (_) | | ____| | | | __ \ | | | | + * \ \ /\ / / __ _| |_ ___ | |__ __ _ ___| |_ | |__) |_ _| |_| |__ + * \ \/ \/ / '__| | __/ _ \ | __/ _` / __| __| | ___/ _` | __| '_ \ + * \ /\ /| | | | || __/ | | | (_| \__ \ |_ | | | (_| | |_| | | | + * \/ \/ |_| |_|\__\___| |_| \__,_|___/\__| |_| \__,_|\__|_| |_| + */ + +static const struct ocf_io_if _io_if_write_fast_resume = { + .read = ocf_write_wb_do, + .write = ocf_write_wb_do, +}; + +int ocf_write_fast(struct ocf_request *rq) +{ + bool mapped; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_write_fast_resume; + + /*- Metadata RD access -----------------------------------------------*/ + + OCF_METADATA_LOCK_RD(); + + /* Traverse request to cache if there is hit */ + ocf_engine_traverse(rq); + + mapped = ocf_engine_is_mapped(rq); + if (mapped) { + ocf_io_start(rq->io); + lock = ocf_rq_trylock_wr(rq); + } + + OCF_METADATA_UNLOCK_RD(); + + if (mapped) { + if (lock >= 0) { + OCF_DEBUG_RQ(rq, "Fast path success"); + + if (lock != OCF_LOCK_ACQUIRED) { + /* Lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } else { + /* Lock was acquired can perform IO */ + ocf_write_wb_do(rq); + } + } else { + OCF_DEBUG_RQ(rq, "Fast path lock failure"); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } else { + OCF_DEBUG_RQ(rq, "Fast path failure"); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return mapped ? OCF_FAST_PATH_YES : OCF_FAST_PATH_NO; + +} diff --git a/src/engine/engine_fast.h b/src/engine/engine_fast.h new file mode 100644 index 0000000..383118a --- /dev/null +++ b/src/engine/engine_fast.h @@ -0,0 +1,12 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_FAST_H_ +#define ENGINE_FAST_H_ + +int ocf_read_fast(struct ocf_request *rq); +int ocf_write_fast(struct ocf_request *rq); + +#endif /* ENGINE_WI_H_ */ diff --git a/src/engine/engine_inv.c b/src/engine/engine_inv.c new file mode 100644 index 0000000..1d6531b --- /dev/null +++ b/src/engine/engine_inv.c @@ -0,0 +1,74 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_inv.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_cache_line.h" +#include "../metadata/metadata.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "inv" +#include "engine_debug.h" + +static void _ocf_invalidate_rq(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + rq->error = error; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.write); + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) + ocf_engine_error(rq, true, "Failed to flush metadata to cache"); + + ocf_rq_unlock(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); +} + +static int _ocf_invalidate_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + ENV_BUG_ON(env_atomic_read(&rq->req_remaining)); + + OCF_METADATA_LOCK_WR(); + ocf_purge_map_info(rq); + OCF_METADATA_UNLOCK_WR(); + + env_atomic_inc(&rq->req_remaining); + + if (ocf_data_obj_is_atomic(&cache->device->obj) && + rq->info.flush_metadata) { + /* Metadata flush IO */ + ocf_metadata_flush_do_asynch(cache, rq, _ocf_invalidate_rq); + } + + _ocf_invalidate_rq(rq, 0); + + return 0; +} + +static const struct ocf_io_if _io_if_invalidate = { + .read = _ocf_invalidate_do, + .write = _ocf_invalidate_do, +}; + +void ocf_engine_invalidate(struct ocf_request *rq) +{ + ocf_engine_push_rq_front_if(rq, &_io_if_invalidate, true); +} diff --git a/src/engine/engine_inv.h b/src/engine/engine_inv.h new file mode 100644 index 0000000..181dd4b --- /dev/null +++ b/src/engine/engine_inv.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_INV_H_ +#define ENGINE_INV_H_ + +void ocf_engine_invalidate(struct ocf_request *rq); + +#endif /* ENGINE_INV_H_ */ diff --git a/src/engine/engine_ops.c b/src/engine/engine_ops.c new file mode 100644 index 0000000..2a89826 --- /dev/null +++ b/src/engine/engine_ops.c @@ -0,0 +1,65 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "engine_ops.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "ops" +#include "engine_debug.h" + +static void _ocf_engine_ops_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + /* An error occured */ + ocf_engine_error(rq, false, "Core operation failure"); + } + + /* Complete requests - both to cache and to core*/ + rq->complete(rq, rq->error); + + /* Release OCF request */ + ocf_rq_put(rq); +} + +int ocf_engine_ops(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* IO to the core device and to the cache device */ + env_atomic_set(&rq->req_remaining, 2); + + /* Submit operation into core device */ + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, rq->rw, + _ocf_engine_ops_io, rq); + + ocf_submit_cache_reqs(cache, rq->map, rq, rq->rw, + 1, _ocf_engine_ops_io, rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + + diff --git a/src/engine/engine_ops.h b/src/engine/engine_ops.h new file mode 100644 index 0000000..fac6c87 --- /dev/null +++ b/src/engine/engine_ops.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __CACHE_ENGINE_OPS_H_ +#define __CACHE_ENGINE_OPS_H_ + +int ocf_engine_ops(struct ocf_request *rq); + +#endif /* __CACHE_ENGINE_OPS_H_ */ diff --git a/src/engine/engine_pt.c b/src/engine/engine_pt.c new file mode 100644 index 0000000..b6b08f1 --- /dev/null +++ b/src/engine/engine_pt.c @@ -0,0 +1,181 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_pt.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../utils/utils_part.h" +#include "../metadata/metadata.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "pt" +#include "engine_debug.h" + +static void _ocf_read_pt_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + rq->info.core_error = 1; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.read); + } + + /* Complete request */ + rq->complete(rq, rq->error); + + ocf_rq_unlock_rd(rq); + + /* Release OCF request */ + ocf_rq_put(rq); +} + +static inline void _ocf_read_pt_submit(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + env_atomic_set(&rq->req_remaining, 1); /* Core device IO */ + + OCF_DEBUG_RQ(rq, "Submit"); + + /* Core read */ + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ, + _ocf_read_pt_io, rq); +} + +int ocf_read_pt_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + if (rq->info.dirty_any) { + OCF_METADATA_LOCK_RD(); + /* Need to clean, start it */ + ocf_engine_clean(rq); + OCF_METADATA_UNLOCK_RD(); + + /* Do not processing, because first we need to clean request */ + ocf_rq_put(rq); + + return 0; + } + + if (rq->info.re_part) { + OCF_DEBUG_RQ(rq, "Re-Part"); + + OCF_METADATA_LOCK_WR(); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_part_move(rq); + + OCF_METADATA_UNLOCK_WR(); + } + + /* Submit read IO to the core */ + _ocf_read_pt_submit(rq); + + /* Update statistics */ + ocf_engine_update_block_stats(rq); + env_atomic64_inc(&cache->core_obj[rq->core_id].counters-> + part_counters[rq->part_id].read_reqs.pass_through); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_pt_resume = { + .read = ocf_read_pt_do, + .write = ocf_read_pt_do, +}; + +int ocf_read_pt(struct ocf_request *rq) +{ + bool use_cache = false; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + ocf_io_start(rq->io); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_pt_resume; + + OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ + + /* Traverse request to check if there are mapped cache lines */ + ocf_engine_traverse(rq); + + if (rq->info.seq_cutoff && ocf_engine_is_dirty_all(rq)) { + use_cache = true; + } else { + if (ocf_engine_mapped_count(rq)) { + /* There are mapped cache line, + * lock request for READ access + */ + lock = ocf_rq_trylock_rd(rq); + } else { + /* No mapped cache lines, no need to get lock */ + lock = OCF_LOCK_ACQUIRED; + } + } + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ + + if (use_cache) { + /* + * There is dirt HIT, and sequential cut off, + * because of this force read data from cache + */ + ocf_rq_clear(rq); + ocf_get_io_if(ocf_cache_mode_wt)->read(rq); + } else { + if (lock >= 0) { + if (lock == OCF_LOCK_ACQUIRED) { + /* Lock acquired perform read off operations */ + ocf_read_pt_do(rq); + } else { + /* WR lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +void ocf_engine_push_rq_front_pt(struct ocf_request *rq) +{ + ocf_engine_push_rq_front_if(rq, &_io_if_pt_resume, true); +} + diff --git a/src/engine/engine_pt.h b/src/engine/engine_pt.h new file mode 100644 index 0000000..b221535 --- /dev/null +++ b/src/engine/engine_pt.h @@ -0,0 +1,15 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_OFF_H_ +#define ENGINE_OFF_H_ + +int ocf_read_pt(struct ocf_request *rq); + +int ocf_read_pt_do(struct ocf_request *rq); + +void ocf_engine_push_rq_front_pt(struct ocf_request *rq); + +#endif /* ENGINE_OFF_H_ */ diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c new file mode 100644 index 0000000..22f7611 --- /dev/null +++ b/src/engine/engine_rd.c @@ -0,0 +1,319 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_rd.h" +#include "engine_pt.h" +#include "engine_inv.h" +#include "engine_bf.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../concurrency/ocf_concurrency.h" +#include "../utils/utils_io.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_part.h" +#include "../metadata/metadata.h" +#include "../ocf_def_priv.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "rd" +#include "engine_debug.h" + +static void _ocf_read_generic_hit_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (rq->error) + inc_fallback_pt_error_counter(rq->cache); + + /* Handle callback-caller race to let only one of the two complete the + * request. Also, complete original request only if this is the last + * sub-request to complete + */ + if (env_atomic_dec_return(&rq->req_remaining) == 0) { + OCF_DEBUG_RQ(rq, "HIT completion"); + + if (rq->error) { + env_atomic_inc(&rq->cache->core_obj[rq->core_id]. + counters->cache_errors.read); + ocf_engine_push_rq_front_pt(rq); + } else { + + ocf_rq_unlock(rq); + + /* Complete request */ + rq->complete(rq, rq->error); + + /* Free the request at the last point + * of the completion path + */ + ocf_rq_put(rq); + } + } +} + +static void _ocf_read_generic_miss_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + struct ocf_cache *cache = rq->cache; + + if (error) + rq->error = error; + + /* Handle callback-caller race to let only one of the two complete the + * request. Also, complete original request only if this is the last + * sub-request to complete + */ + if (env_atomic_dec_return(&rq->req_remaining) == 0) { + OCF_DEBUG_RQ(rq, "MISS completion"); + + if (rq->error) { + /* + * --- Do not submit this request to write-back-thread. + * Stop it here --- + */ + rq->complete(rq, rq->error); + + rq->info.core_error = 1; + env_atomic_inc(&cache->core_obj[rq->core_id]. + counters->core_errors.read); + + ctx_data_free(cache->owner, rq->cp_data); + rq->cp_data = NULL; + + /* Invalidate metadata */ + ocf_engine_invalidate(rq); + + return; + } + + /* Copy pages to copy vec, since this is the one needed + * by the above layer + */ + ctx_data_cpy(cache->owner, rq->cp_data, rq->data, 0, 0, + rq->byte_length); + + /* Complete request */ + rq->complete(rq, rq->error); + + ocf_engine_backfill(rq); + } +} + +static inline void _ocf_read_generic_submit_hit(struct ocf_request *rq) +{ + env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); + + ocf_submit_cache_reqs(rq->cache, rq->map, rq, OCF_READ, + ocf_engine_io_count(rq), _ocf_read_generic_hit_io, rq); +} + +static inline void _ocf_read_generic_submit_miss(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + int ret; + + env_atomic_set(&rq->req_remaining, 1); + + rq->cp_data = ctx_data_alloc(cache->owner, + BYTES_TO_PAGES(rq->byte_length)); + if (!rq->cp_data) + goto err_alloc; + + ret = ctx_data_mlock(cache->owner, rq->cp_data); + if (ret) + goto err_alloc; + + /* Submit read request to core device. */ + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_READ, + _ocf_read_generic_miss_io, rq); + + return; + +err_alloc: + _ocf_read_generic_miss_io(rq, -ENOMEM); +} + +static int _ocf_read_generic_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + if (ocf_engine_is_miss(rq) && rq->map->rd_locked) { + /* Miss can be handled only on write locks. + * Need to switch to PT + */ + OCF_DEBUG_RQ(rq, "Switching to PT"); + ocf_read_pt_do(rq); + return 0; + } + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + if (ocf_engine_is_miss(rq)) { + if (rq->info.dirty_any) { + OCF_METADATA_LOCK_RD(); + + /* Request is dirty need to clean request */ + ocf_engine_clean(rq); + + OCF_METADATA_UNLOCK_RD(); + + /* We need to clean request before processing, return */ + ocf_rq_put(rq); + + return 0; + } + + OCF_METADATA_LOCK_RD(); + + /* Set valid status bits map */ + ocf_set_valid_map_info(rq); + + OCF_METADATA_UNLOCK_RD(); + } + + if (rq->info.re_part) { + OCF_DEBUG_RQ(rq, "Re-Part"); + + OCF_METADATA_LOCK_WR(); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_part_move(rq); + + OCF_METADATA_UNLOCK_WR(); + } + + OCF_DEBUG_RQ(rq, "Submit"); + + /* Submit IO */ + if (ocf_engine_is_hit(rq)) + _ocf_read_generic_submit_hit(rq); + else + _ocf_read_generic_submit_miss(rq); + + /* Updata statistics */ + ocf_engine_update_request_stats(rq); + ocf_engine_update_block_stats(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_read_generic_resume = { + .read = _ocf_read_generic_do, + .write = _ocf_read_generic_do, +}; + +int ocf_read_generic(struct ocf_request *rq) +{ + bool mapped; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + ocf_io_start(rq->io); + + if (env_atomic_read(&cache->pending_read_misses_list_blocked)) { + /* There are conditions to bypass IO */ + ocf_get_io_if(ocf_cache_mode_pt)->read(rq); + return 0; + } + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_read_generic_resume; + + /*- Metadata RD access -----------------------------------------------*/ + + OCF_METADATA_LOCK_RD(); + + /* Traverse request to cache if there is hit */ + ocf_engine_traverse(rq); + + mapped = ocf_engine_is_mapped(rq); + if (mapped) { + /* Request is fully mapped, no need to call eviction */ + if (ocf_engine_is_hit(rq)) { + /* There is a hit, lock request for READ access */ + lock = ocf_rq_trylock_rd(rq); + } else { + /* All cache line mapped, but some sectors are not valid + * and cache insert will be performed - lock for + * WRITE is required + */ + lock = ocf_rq_trylock_wr(rq); + } + } + + OCF_METADATA_UNLOCK_RD(); + + /*- END Metadata RD access -------------------------------------------*/ + + if (!mapped) { + + /*- Metadata WR access ---------------------------------------*/ + + OCF_METADATA_LOCK_WR(); + + /* Now there is exclusive access for metadata. May traverse once + * again. If there are misses need to call eviction. This + * process is called 'mapping'. + */ + ocf_engine_map(rq); + + if (!rq->info.eviction_error) { + if (ocf_engine_is_hit(rq)) { + /* After mapping turns out there is hit, + * so lock OCF request for read access + */ + lock = ocf_rq_trylock_rd(rq); + } else { + /* Miss, new cache lines were mapped, + * need to lock OCF request for write access + */ + lock = ocf_rq_trylock_wr(rq); + } + } + OCF_METADATA_UNLOCK_WR(); + + /*- END Metadata WR access -----------------------------------*/ + } + + if (!rq->info.eviction_error) { + if (lock >= 0) { + if (lock != OCF_LOCK_ACQUIRED) { + /* Lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } else { + /* Lock was acquired can perform IO */ + _ocf_read_generic_do(rq); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } else { + ocf_rq_clear(rq); + ocf_get_io_if(ocf_cache_mode_pt)->read(rq); + } + + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} diff --git a/src/engine/engine_rd.h b/src/engine/engine_rd.h new file mode 100644 index 0000000..275778b --- /dev/null +++ b/src/engine/engine_rd.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_RD_H_ +#define ENGINE_RD_H_ + +int ocf_read_generic(struct ocf_request *rq); + +#endif /* ENGINE_RD_H_ */ diff --git a/src/engine/engine_wa.c b/src/engine/engine_wa.c new file mode 100644 index 0000000..c7f0ffe --- /dev/null +++ b/src/engine/engine_wa.c @@ -0,0 +1,92 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_wa.h" +#include "engine_common.h" +#include "cache_engine.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../metadata/metadata.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "wa" +#include "engine_debug.h" + +static void _ocf_read_wa_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) + rq->error |= error; + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + if (rq->error) { + rq->info.core_error = 1; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.write); + } + + /* Complete request */ + rq->complete(rq, rq->error); + + OCF_DEBUG_RQ(rq, "Completion"); + + /* Release OCF request */ + ocf_rq_put(rq); +} + +int ocf_write_wa(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + ocf_io_start(rq->io); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + OCF_METADATA_LOCK_RD(); /*- Metadata RD access -----------------------*/ + + /* Traverse request to check if there are mapped cache lines */ + ocf_engine_traverse(rq); + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata RD access -----------------*/ + + if (ocf_engine_is_hit(rq)) { + ocf_rq_clear(rq); + + /* There is HIT, do WT */ + ocf_get_io_if(ocf_cache_mode_wt)->write(rq); + + } else if (ocf_engine_mapped_count(rq)) { + ocf_rq_clear(rq); + + /* Partial MISS, do WI */ + ocf_get_io_if(ocf_cache_mode_wi)->write(rq); + } else { + + /* There is no mapped cache line, write directly into core */ + + OCF_DEBUG_RQ(rq, "Submit"); + + /* Submit write IO to the core */ + env_atomic_set(&rq->req_remaining, 1); + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, + OCF_WRITE, _ocf_read_wa_io, rq); + + /* Update statistics */ + ocf_engine_update_block_stats(rq); + env_atomic64_inc(&cache->core_obj[rq->core_id].counters-> + part_counters[rq->part_id].write_reqs.pass_through); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + + diff --git a/src/engine/engine_wa.h b/src/engine/engine_wa.h new file mode 100644 index 0000000..ab23d71 --- /dev/null +++ b/src/engine/engine_wa.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_WA_H_ +#define ENGINE_WA_H_ + +int ocf_write_wa(struct ocf_request *rq); + +#endif /* ENGINE_WA_H_ */ diff --git a/src/engine/engine_wb.c b/src/engine/engine_wb.c new file mode 100644 index 0000000..a774227 --- /dev/null +++ b/src/engine/engine_wb.c @@ -0,0 +1,242 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "cache_engine.h" +#include "engine_common.h" +#include "engine_wb.h" +#include "../metadata/metadata.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_part.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "wb" +#include "engine_debug.h" + +static const struct ocf_io_if _io_if_wb_resume = { + .read = ocf_write_wb_do, + .write = ocf_write_wb_do, +}; + +static void _ocf_write_wb_update_bits(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + if (ocf_engine_is_miss(rq)) { + OCF_METADATA_LOCK_RD(); + /* Update valid status bits */ + ocf_set_valid_map_info(rq); + + OCF_METADATA_UNLOCK_RD(); + } + + if (!ocf_engine_is_dirty_all(rq)) { + OCF_METADATA_LOCK_WR(); + + /* set dirty bits, and mark if metadata flushing is required */ + ocf_set_dirty_map_info(rq); + + OCF_METADATA_UNLOCK_WR(); + } +} + +static void _ocf_write_wb_io_flush_metadata(void *private_data, int error) +{ + struct ocf_request *rq = (struct ocf_request *) private_data; + + if (error) + rq->error = error; + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + if (rq->error) + ocf_engine_error(rq, true, "Failed to write data to cache"); + + ocf_rq_unlock_wr(rq); + + rq->complete(rq, rq->error); + + ocf_rq_put(rq); +} + +static int ocf_write_wb_do_flush_metadata(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + env_atomic_set(&rq->req_remaining, 1); /* One core IO */ + + if (rq->info.flush_metadata) { + OCF_DEBUG_RQ(rq, "Flush metadata"); + ocf_metadata_flush_do_asynch(cache, rq, + _ocf_write_wb_io_flush_metadata); + } + + _ocf_write_wb_io_flush_metadata(rq, 0); + + return 0; +} + +static const struct ocf_io_if _io_if_wb_flush_metadata = { + .read = ocf_write_wb_do_flush_metadata, + .write = ocf_write_wb_do_flush_metadata, +}; + +static void _ocf_write_wb_io(void *private_data, int error) +{ + struct ocf_request *rq = (struct ocf_request *) private_data; + + if (error) { + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.write); + rq->error |= error; + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + ocf_engine_error(rq, true, "Failed to write data to cache"); + + ocf_rq_unlock_wr(rq); + + rq->complete(rq, rq->error); + + ocf_rq_put(rq); + } else { + ocf_engine_push_rq_front_if(rq, &_io_if_wb_flush_metadata, + true); + } +} + + +static inline void _ocf_write_wb_submit(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); + + /* + * 1. Submit data + * 2. Wait for completion of data + * 3. Then continue processing request (flush metadata) + */ + + if (rq->info.re_part) { + OCF_DEBUG_RQ(rq, "Re-Part"); + + OCF_METADATA_LOCK_WR(); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_part_move(rq); + + OCF_METADATA_UNLOCK_WR(); + } + + OCF_DEBUG_RQ(rq, "Submit Data"); + + /* Data IO */ + ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE, + ocf_engine_io_count(rq), _ocf_write_wb_io, rq); +} + +int ocf_write_wb_do(struct ocf_request *rq) +{ + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Updata status bits */ + _ocf_write_wb_update_bits(rq); + + /* Submit IO */ + _ocf_write_wb_submit(rq); + + /* Updata statistics */ + ocf_engine_update_request_stats(rq); + ocf_engine_update_block_stats(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +int ocf_write_wb(struct ocf_request *rq) +{ + bool mapped; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + ocf_io_start(rq->io); + + /* Not sure if we need this. */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_wb_resume; + + /* TODO: Handle fits into dirty */ + + OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + + /* Travers to check if request is mapped fully */ + ocf_engine_traverse(rq); + + mapped = ocf_engine_is_mapped(rq); + if (mapped) { + /* All cache line are mapped, lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + + if (!mapped) { + OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ + + /* Now there is exclusive access for metadata. May traverse once + * again. If there are misses need to call eviction. This + * process is called 'mapping'. + */ + ocf_engine_map(rq); + + if (!rq->info.eviction_error) { + /* Lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } + + OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + } + + if (!rq->info.eviction_error) { + if (lock >= 0) { + if (lock != OCF_LOCK_ACQUIRED) { + /* WR lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } else { + ocf_write_wb_do(rq); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } else { + ocf_rq_clear(rq); + ocf_get_io_if(ocf_cache_mode_pt)->write(rq); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} diff --git a/src/engine/engine_wb.h b/src/engine/engine_wb.h new file mode 100644 index 0000000..7e9b00d --- /dev/null +++ b/src/engine/engine_wb.h @@ -0,0 +1,12 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef ENGINE_WB_H_ +#define ENGINE_WB_H_ + +int ocf_write_wb(struct ocf_request *rq); + +int ocf_write_wb_do(struct ocf_request *rq); + +#endif /* ENGINE_WI_H_ */ diff --git a/src/engine/engine_wi.c b/src/engine/engine_wi.c new file mode 100644 index 0000000..823d36e --- /dev/null +++ b/src/engine/engine_wi.c @@ -0,0 +1,190 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_wi.h" +#include "engine_common.h" +#include "../concurrency/ocf_concurrency.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_io.h" +#include "../metadata/metadata.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "wi" +#include "engine_debug.h" + +static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq); + +static const struct ocf_io_if _io_if_wi_flush_metadata = { + .read = ocf_write_wi_update_and_flush_metadata, + .write = ocf_write_wi_update_and_flush_metadata, +}; + +static void _ocf_write_wi_io_flush_metadata(void *private_data, int error) +{ + struct ocf_request *rq = (struct ocf_request *) private_data; + + if (error) { + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.write); + rq->error |= error; + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + if (rq->error) + ocf_engine_error(rq, true, "Failed to write data to cache"); + + ocf_rq_unlock_wr(rq); + + rq->complete(rq, rq->error); + + ocf_rq_put(rq); +} + +static int ocf_write_wi_update_and_flush_metadata(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + env_atomic_set(&rq->req_remaining, 1); /* One core IO */ + + if (ocf_engine_mapped_count(rq)) { + /* There are mapped cache line, need to remove them */ + + OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + + /* Remove mapped cache lines from metadata */ + ocf_purge_map_info(rq); + + OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + + if (rq->info.flush_metadata) { + /* Request was dirty and need to flush metadata */ + ocf_metadata_flush_do_asynch(cache, rq, + _ocf_write_wi_io_flush_metadata); + } + + } + + _ocf_write_wi_io_flush_metadata(rq, 0); + + return 0; +} + +static void _ocf_write_wi_core_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + rq->error = error; + rq->info.core_error = 1; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.write); + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + ocf_rq_unlock_wr(rq); + + rq->complete(rq, rq->error); + + ocf_rq_put(rq); + } else { + ocf_engine_push_rq_front_if(rq, &_io_if_wi_flush_metadata, + true); + } +} + +static int _ocf_write_wi_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + env_atomic_set(&rq->req_remaining, 1); /* One core IO */ + + OCF_DEBUG_RQ(rq, "Submit"); + + /* Submit write IO to the core */ + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE, + _ocf_write_wi_core_io, rq); + + /* Update statistics */ + ocf_engine_update_block_stats(rq); + env_atomic64_inc(&cache->core_obj[rq->core_id].counters-> + part_counters[rq->part_id].write_reqs.pass_through); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static void _ocf_write_wi_on_resume(struct ocf_request *rq) +{ + OCF_DEBUG_RQ(rq, "On resume"); + ocf_engine_push_rq_front(rq, true); +} + +static const struct ocf_io_if _io_if_wi_resume = { + .read = _ocf_write_wi_do, + .write = _ocf_write_wi_do, +}; + +int ocf_write_wi(struct ocf_request *rq) +{ + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + OCF_DEBUG_TRACE(rq->cache); + + ocf_io_start(rq->io); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = _ocf_write_wi_on_resume; + rq->io_if = &_io_if_wi_resume; + + OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + + /* Travers to check if request is mapped fully */ + ocf_engine_traverse(rq); + + if (ocf_engine_mapped_count(rq)) { + /* Some cache line are mapped, lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } else { + lock = OCF_LOCK_ACQUIRED; + } + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + + if (lock >= 0) { + if (lock == OCF_LOCK_ACQUIRED) { + _ocf_write_wi_do(rq); + } else { + /* WR lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} diff --git a/src/engine/engine_wi.h b/src/engine/engine_wi.h new file mode 100644 index 0000000..8051e99 --- /dev/null +++ b/src/engine/engine_wi.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_WI_H_ +#define ENGINE_WI_H_ + +int ocf_write_wi(struct ocf_request *rq); + +#endif /* ENGINE_WI_H_ */ diff --git a/src/engine/engine_wt.c b/src/engine/engine_wt.c new file mode 100644 index 0000000..31562a8 --- /dev/null +++ b/src/engine/engine_wt.c @@ -0,0 +1,236 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_wt.h" +#include "engine_inv.h" +#include "engine_common.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_part.h" +#include "../metadata/metadata.h" +#include "../concurrency/ocf_concurrency.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "wt" +#include "engine_debug.h" + +static void _ocf_write_wt_io(struct ocf_request *rq) +{ + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_RQ(rq, "Completion"); + + if (rq->error) { + /* An error occured */ + + /* Complete request */ + rq->complete(rq, rq->info.core_error ? rq->error : 0); + + ocf_engine_invalidate(rq); + } else { + /* Unlock reqest from WRITE access */ + ocf_rq_unlock_wr(rq); + + /* Complete request */ + rq->complete(rq, rq->info.core_error ? rq->error : 0); + + /* Release OCF request */ + ocf_rq_put(rq); + } +} + +static void _ocf_write_wt_cache_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + rq->error = rq->error ?: error; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.write); + + if (rq->error) + inc_fallback_pt_error_counter(rq->cache); + } + + _ocf_write_wt_io(rq); +} + +static void _ocf_write_wt_core_io(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + rq->error = error; + rq->info.core_error = 1; + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + core_errors.write); + } + + _ocf_write_wt_io(rq); +} + +static inline void _ocf_write_wt_submit(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + /* Submit IOs */ + OCF_DEBUG_RQ(rq, "Submit"); + + /* Calculate how many IOs need to be submited */ + env_atomic_set(&rq->req_remaining, ocf_engine_io_count(rq)); /* Cache IO */ + env_atomic_inc(&rq->req_remaining); /* Core device IO */ + + if (rq->info.flush_metadata) { + /* Metadata flush IO */ + + ocf_metadata_flush_do_asynch(cache, rq, + _ocf_write_wt_cache_io); + } + + /* To cache */ + ocf_submit_cache_reqs(cache, rq->map, rq, OCF_WRITE, + ocf_engine_io_count(rq), _ocf_write_wt_cache_io, rq); + + /* To core */ + ocf_submit_obj_req(&cache->core_obj[rq->core_id].obj, rq, OCF_WRITE, + _ocf_write_wt_core_io, rq); +} + +static void _ocf_write_wt_update_bits(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + if (ocf_engine_is_miss(rq)) { + OCF_METADATA_LOCK_RD(); + + /* Update valid status bits */ + ocf_set_valid_map_info(rq); + + OCF_METADATA_UNLOCK_RD(); + } + + if (rq->info.dirty_any) { + OCF_METADATA_LOCK_WR(); + + /* Writes goes to SDD and HDD, need to update status bits from + * dirty to clean + */ + + ocf_set_clean_map_info(rq); + + OCF_METADATA_UNLOCK_WR(); + } + + if (rq->info.re_part) { + OCF_DEBUG_RQ(rq, "Re-Part"); + + OCF_METADATA_LOCK_WR(); + + /* Probably some cache lines are assigned into wrong + * partition. Need to move it to new one + */ + ocf_part_move(rq); + + OCF_METADATA_UNLOCK_WR(); + } +} + +static int _ocf_write_wt_do(struct ocf_request *rq) +{ + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Update status bits */ + _ocf_write_wt_update_bits(rq); + + /* Submit IO */ + _ocf_write_wt_submit(rq); + + /* Updata statistics */ + ocf_engine_update_request_stats(rq); + ocf_engine_update_block_stats(rq); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_wt_resume = { + .read = _ocf_write_wt_do, + .write = _ocf_write_wt_do, +}; + +int ocf_write_wt(struct ocf_request *rq) +{ + bool mapped; + int lock = OCF_LOCK_NOT_ACQUIRED; + struct ocf_cache *cache = rq->cache; + + ocf_io_start(rq->io); + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Set resume call backs */ + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_wt_resume; + + OCF_METADATA_LOCK_RD(); /*- Metadata READ access, No eviction --------*/ + + /* Travers to check if request is mapped fully */ + ocf_engine_traverse(rq); + + mapped = ocf_engine_is_mapped(rq); + if (mapped) { + /* All cache line are mapped, lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } + + OCF_METADATA_UNLOCK_RD(); /*- END Metadata READ access----------------*/ + + if (!mapped) { + OCF_METADATA_LOCK_WR(); /*- Metadata WR access, eviction -----*/ + + /* Now there is exclusive access for metadata. May traverse once + * again. If there are misses need to call eviction. This + * process is called 'mapping'. + */ + ocf_engine_map(rq); + + if (!rq->info.eviction_error) { + /* Lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + } + + OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + } + + if (!rq->info.eviction_error) { + if (lock >= 0) { + if (lock != OCF_LOCK_ACQUIRED) { + /* WR lock was not acquired, need to wait for resume */ + OCF_DEBUG_RQ(rq, "NO LOCK"); + } else { + _ocf_write_wt_do(rq); + } + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d\n", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } + } else { + ocf_rq_clear(rq); + ocf_get_io_if(ocf_cache_mode_pt)->write(rq); + } + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} diff --git a/src/engine/engine_wt.h b/src/engine/engine_wt.h new file mode 100644 index 0000000..5b69024 --- /dev/null +++ b/src/engine/engine_wt.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_WT_H_ +#define ENGINE_WT_H_ + +int ocf_write_wt(struct ocf_request *rq); + +#endif /* ENGINE_WT_H_ */ diff --git a/src/engine/engine_zero.c b/src/engine/engine_zero.c new file mode 100644 index 0000000..6d09eb7 --- /dev/null +++ b/src/engine/engine_zero.c @@ -0,0 +1,168 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "engine_zero.h" +#include "engine_common.h" +#include "../concurrency/ocf_concurrency.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_io.h" +#include "../metadata/metadata.h" + +#define OCF_ENGINE_DEBUG_IO_NAME "zero" +#include "engine_debug.h" + +static int ocf_zero_purge(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + if (rq->error) { + ocf_engine_error(rq, true, "Failed to discard data on cache"); + } else { + /* There are mapped cache line, need to remove them */ + + OCF_METADATA_LOCK_WR(); /*- Metadata WR access ---------------*/ + + /* Remove mapped cache lines from metadata */ + ocf_purge_map_info(rq); + + OCF_METADATA_UNLOCK_WR(); /*- END Metadata WR access ---------*/ + } + + ocf_rq_unlock_wr(rq); + + rq->complete(rq, rq->error); + + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_zero_purge = { + .read = ocf_zero_purge, + .write = ocf_zero_purge, +}; + +static void _ocf_zero_io_flush_metadata(void *private_data, int error) +{ + struct ocf_request *rq = (struct ocf_request *) private_data; + + if (error) { + env_atomic_inc(&rq->cache->core_obj[rq->core_id].counters-> + cache_errors.write); + rq->error = error; + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + ocf_engine_push_rq_front_if(rq, &_io_if_zero_purge, true); +} + +static inline void ocf_zero_map_info(struct ocf_request *rq) +{ + uint32_t map_idx = 0; + uint8_t start_bit; + uint8_t end_bit; + struct ocf_map_info *map = rq->map; + struct ocf_cache *cache = rq->cache; + uint32_t count = rq->core_line_count; + + /* Purge range on the basis of map info + * + * | 01234567 | 01234567 | ... | 01234567 | 01234567 | + * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | + * | first | Middle | last | + */ + + for (map_idx = 0; map_idx < count; map_idx++) { + if (map[map_idx].status == LOOKUP_MISS) + continue; + + start_bit = 0; + end_bit = ocf_line_end_sector(cache); + + if (map_idx == 0) { + /* First */ + start_bit = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + } + + if (map_idx == (count - 1)) { + /* Last */ + end_bit = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1) % + ocf_line_sectors(cache); + } + + ocf_metadata_flush_mark(cache, rq, map_idx, INVALID, + start_bit, end_bit); + } +} + +static int _ocf_zero_do(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + + /* Get OCF request - increase reference counter */ + ocf_rq_get(rq); + + /* Mark cache lines for zeroing/discarding */ + ocf_zero_map_info(rq); + + /* Discard marked cache lines */ + env_atomic_set(&rq->req_remaining, 1); + if (rq->info.flush_metadata) { + /* Request was dirty and need to flush metadata */ + ocf_metadata_flush_do_asynch(cache, rq, + _ocf_zero_io_flush_metadata); + } + _ocf_zero_io_flush_metadata(rq, 0); + + /* Put OCF request - decrease reference counter */ + ocf_rq_put(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_ocf_zero_do = { + .read = _ocf_zero_do, + .write = _ocf_zero_do, +}; + +/** + * @note + * - Caller has to have metadata write lock + * - Core line has to be mapped + */ +void ocf_engine_zero_line(struct ocf_request *rq) +{ + int lock = OCF_LOCK_NOT_ACQUIRED; + + ENV_BUG_ON(rq->core_line_count != 1); + + /* Traverse to check if request is mapped */ + ocf_engine_traverse(rq); + + ENV_BUG_ON(!ocf_engine_is_mapped(rq)); + + rq->resume = ocf_engine_on_resume; + rq->io_if = &_io_if_ocf_zero_do; + + /* Some cache line are mapped, lock request for WRITE access */ + lock = ocf_rq_trylock_wr(rq); + + if (lock >= 0) { + ENV_BUG_ON(lock != OCF_LOCK_ACQUIRED); + ocf_engine_push_rq_front_if(rq, &_io_if_ocf_zero_do, true); + } else { + OCF_DEBUG_RQ(rq, "LOCK ERROR %d", lock); + rq->complete(rq, lock); + ocf_rq_put(rq); + } +} + diff --git a/src/engine/engine_zero.h b/src/engine/engine_zero.h new file mode 100644 index 0000000..ba58519 --- /dev/null +++ b/src/engine/engine_zero.h @@ -0,0 +1,11 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef ENGINE_ZERO_H_ +#define ENGINE_ZERO_H_ + +void ocf_engine_zero_line(struct ocf_request *rq); + +#endif /* ENGINE_ZERO_H_ */ diff --git a/src/eviction/eviction.c b/src/eviction/eviction.c new file mode 100644 index 0000000..4196dea --- /dev/null +++ b/src/eviction/eviction.c @@ -0,0 +1,19 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "eviction.h" + +struct eviction_policy_ops evict_policy_ops[ocf_eviction_max] = { + [ocf_eviction_lru] = { + .init_cline = evp_lru_init_cline, + .rm_cline = evp_lru_rm_cline, + .req_clines = evp_lru_req_clines, + .hot_cline = evp_lru_hot_cline, + .init_evp = evp_lru_init_evp, + .dirty_cline = evp_lru_dirty_cline, + .clean_cline = evp_lru_clean_cline, + .name = "lru", + }, +}; diff --git a/src/eviction/eviction.h b/src/eviction/eviction.h new file mode 100644 index 0000000..f3a8a9b --- /dev/null +++ b/src/eviction/eviction.h @@ -0,0 +1,56 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __LAYER_EVICTION_POLICY_H__ + +#define __LAYER_EVICTION_POLICY_H__ + +#define OCF_PENDING_EVICTION_LIMIT 512UL + +#include "ocf/ocf.h" +#include "lru.h" +#include "lru_structs.h" + +struct eviction_policy { + union { + struct lru_eviction_policy lru; + } policy; +}; + +/* Eviction policy metadata per cache line */ +union eviction_policy_meta { + struct lru_eviction_policy_meta lru; +} __attribute__((packed)); + +/* the caller must hold the metadata lock for all operations + * + * For range operations the caller can: + * set core_id to -1 to purge the whole cache device + * set core_id to -2 to purge the whole cache partition + */ +struct eviction_policy_ops { + void (*init_cline)(struct ocf_cache *cache, + ocf_cache_line_t cline); + void (*rm_cline)(struct ocf_cache *cache, + ocf_cache_line_t cline); + bool (*can_evict)(struct ocf_cache *cache); + uint32_t (*req_clines)(struct ocf_cache *cache, + uint32_t io_queue, ocf_part_id_t part_id, + uint32_t cline_no, ocf_core_id_t core_id); + void (*hot_cline)(struct ocf_cache *cache, + ocf_cache_line_t cline); + void (*init_evp)(struct ocf_cache *cache, + ocf_part_id_t part_id); + void (*dirty_cline)(struct ocf_cache *cache, + ocf_part_id_t part_id, + uint32_t cline_no); + void (*clean_cline)(struct ocf_cache *cache, + ocf_part_id_t part_id, + uint32_t cline_no); + const char *name; +}; + +extern struct eviction_policy_ops evict_policy_ops[ocf_eviction_max]; + +#endif diff --git a/src/eviction/lru.c b/src/eviction/lru.c new file mode 100644 index 0000000..afaa986 --- /dev/null +++ b/src/eviction/lru.c @@ -0,0 +1,503 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "eviction.h" +#include "lru.h" +#include "ops.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_cache_line.h" +#include "../concurrency/ocf_concurrency.h" +#include "../mngt/ocf_mngt_common.h" +#include "../engine/engine_zero.h" +#include "../utils/utils_rq.h" + +#define OCF_EVICTION_MAX_SCAN 1024 + +/* -- Start of LRU functions --*/ + +/* Returns 1 if the given collision_index is the _head_ of + * the LRU list, 0 otherwise. + */ +/* static inline int is_lru_head(unsigned collision_index) { + * return collision_index == lru_list.lru_head; + * } + */ + +#define is_lru_head(x) (x == collision_table_entries) +#define is_lru_tail(x) (x == collision_table_entries) + +/* Sets the given collision_index as the new _head_ of the LRU list. */ +static inline void update_lru_head(struct ocf_cache *cache, + int partition_id, unsigned int collision_index, + int cline_dirty) +{ + struct ocf_user_part *part = &cache->user_parts[partition_id]; + + + if (cline_dirty) + part->runtime->eviction.policy.lru.dirty_head = collision_index; + else + part->runtime->eviction.policy.lru.clean_head = collision_index; +} + +/* Sets the given collision_index as the new _tail_ of the LRU list. */ +static inline void update_lru_tail(struct ocf_cache *cache, + int partition_id, unsigned int collision_index, + int cline_dirty) +{ + struct ocf_user_part *part = &cache->user_parts[partition_id]; + + if (cline_dirty) + part->runtime->eviction.policy.lru.dirty_tail = collision_index; + else + part->runtime->eviction.policy.lru.clean_tail = collision_index; +} + +/* Sets the given collision_index as the new _head_ and _tail_ of + * the LRU list. + */ +static inline void update_lru_head_tail(struct ocf_cache *cache, + int partition_id, unsigned int collision_index, int cline_dirty) +{ + update_lru_head(cache, partition_id, collision_index, cline_dirty); + update_lru_tail(cache, partition_id, collision_index, cline_dirty); +} + +/* Adds the given collision_index to the _head_ of the LRU list */ +static void add_lru_head(struct ocf_cache *cache, int partition_id, + unsigned int collision_index, int cline_dirty) +{ + unsigned int curr_head_index; + unsigned int collision_table_entries = + cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[partition_id]; + union eviction_policy_meta eviction; + + ENV_BUG_ON(!(collision_index < collision_table_entries)); + + ocf_metadata_get_evicition_policy(cache, collision_index, &eviction); + + /* First node to be added/ */ + if ((cline_dirty && !part->runtime->eviction.policy.lru.has_dirty_nodes) || + (!cline_dirty && !part->runtime->eviction.policy.lru.has_clean_nodes)) { + update_lru_head_tail(cache, partition_id, collision_index, cline_dirty); + + eviction.lru.next = collision_table_entries; + eviction.lru.prev = collision_table_entries; + + if (cline_dirty) + part->runtime->eviction.policy.lru.has_dirty_nodes = 1; + else + part->runtime->eviction.policy.lru.has_clean_nodes = 1; + + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + } else { + union eviction_policy_meta eviction_curr; + + /* Not the first node to be added. */ + curr_head_index = cline_dirty ? + part->runtime->eviction.policy.lru.dirty_head : + part->runtime->eviction.policy.lru.clean_head; + + ENV_BUG_ON(!(curr_head_index < collision_table_entries)); + + ocf_metadata_get_evicition_policy(cache, curr_head_index, + &eviction_curr); + + eviction.lru.next = curr_head_index; + eviction.lru.prev = collision_table_entries; + eviction_curr.lru.prev = collision_index; + + update_lru_head(cache, partition_id, collision_index, cline_dirty); + + ocf_metadata_set_evicition_policy(cache, curr_head_index, + &eviction_curr); + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + } +} + +/* Deletes the node with the given collision_index from the lru list */ +static void remove_lru_list(struct ocf_cache *cache, int partition_id, + unsigned int collision_index, int cline_dirty) +{ + int is_clean_head = 0, is_clean_tail = 0, is_dirty_head = 0, is_dirty_tail = 0; + uint32_t prev_lru_node, next_lru_node; + uint32_t collision_table_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[partition_id]; + union eviction_policy_meta eviction; + + ENV_BUG_ON(!(collision_index < collision_table_entries)); + + ocf_metadata_get_evicition_policy(cache, collision_index, &eviction); + + /* Find out if this node is LRU _head_ or LRU _tail_ */ + if (part->runtime->eviction.policy.lru.clean_head == collision_index) + is_clean_head = 1; + if (part->runtime->eviction.policy.lru.dirty_head == collision_index) + is_dirty_head = 1; + if (part->runtime->eviction.policy.lru.clean_tail == collision_index) + is_clean_tail = 1; + if (part->runtime->eviction.policy.lru.dirty_tail == collision_index) + is_dirty_tail = 1; + ENV_BUG_ON((is_clean_tail || is_clean_head) && (is_dirty_tail || is_dirty_head)); + + /* Set prev and next (even if not existent) */ + next_lru_node = eviction.lru.next; + prev_lru_node = eviction.lru.prev; + + /* Case 1: If we are head AND tail, there is only one node. + * So unlink node and set that there is no node left in the list. + */ + if ((is_clean_head && is_clean_tail) || (is_dirty_head && is_dirty_tail)) { + eviction.lru.next = collision_table_entries; + eviction.lru.prev = collision_table_entries; + + update_lru_head_tail(cache, partition_id, collision_table_entries, cline_dirty); + + if (cline_dirty) + part->runtime->eviction.policy.lru.has_dirty_nodes = 0; + else + part->runtime->eviction.policy.lru.has_clean_nodes = 0; + + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + + update_lru_head_tail(cache, partition_id, + collision_table_entries, cline_dirty); + } + + /* Case 2: else if this collision_index is LRU head, but not tail, + * update head and return + */ + else if ((!is_clean_tail && is_clean_head) || (!is_dirty_tail && is_dirty_head)) { + union eviction_policy_meta eviction_next; + + ENV_BUG_ON(!(next_lru_node < collision_table_entries)); + + ocf_metadata_get_evicition_policy(cache, next_lru_node, + &eviction_next); + + update_lru_head(cache, partition_id, next_lru_node, cline_dirty); + + eviction.lru.next = collision_table_entries; + eviction_next.lru.prev = collision_table_entries; + + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + + ocf_metadata_set_evicition_policy(cache, next_lru_node, + &eviction_next); + } + + /* Case 3: else if this collision_index is LRU tail, but not head, + * update tail and return + */ + else if ((is_clean_tail && !is_clean_head) || (is_dirty_tail && !is_dirty_head)) { + union eviction_policy_meta eviction_prev; + + ENV_BUG_ON(!(prev_lru_node < collision_table_entries)); + + update_lru_tail(cache, partition_id, prev_lru_node, cline_dirty); + + ocf_metadata_get_evicition_policy(cache, prev_lru_node, + &eviction_prev); + + eviction.lru.prev = collision_table_entries; + eviction_prev.lru.next = collision_table_entries; + + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + + ocf_metadata_set_evicition_policy(cache, prev_lru_node, + &eviction_prev); + } + + /* Case 4: else this collision_index is a middle node. There is no + * change to the head and the tail pointers. + */ + else { + union eviction_policy_meta eviction_prev; + union eviction_policy_meta eviction_next; + + ENV_BUG_ON(!(next_lru_node < collision_table_entries)); + ENV_BUG_ON(!(prev_lru_node < collision_table_entries)); + + ocf_metadata_get_evicition_policy(cache, next_lru_node, + &eviction_next); + ocf_metadata_get_evicition_policy(cache, prev_lru_node, + &eviction_prev); + + /* Update prev and next nodes */ + eviction_prev.lru.next = eviction.lru.next; + eviction_next.lru.prev = eviction.lru.prev; + + /* Update the given node */ + eviction.lru.next = collision_table_entries; + eviction.lru.prev = collision_table_entries; + + ocf_metadata_set_evicition_policy(cache, collision_index, + &eviction); + ocf_metadata_set_evicition_policy(cache, next_lru_node, + &eviction_next); + ocf_metadata_set_evicition_policy(cache, prev_lru_node, + &eviction_prev); + } +} + +/*-- End of LRU functions*/ + +void evp_lru_init_cline(struct ocf_cache *cache, ocf_cache_line_t cline) +{ + union eviction_policy_meta eviction; + + ocf_metadata_get_evicition_policy(cache, cline, &eviction); + + eviction.lru.prev = cache->device->collision_table_entries; + eviction.lru.next = cache->device->collision_table_entries; + + ocf_metadata_set_evicition_policy(cache, cline, &eviction); +} + + +/* the caller must hold the metadata lock */ +void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline) +{ + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline); + + remove_lru_list(cache, part_id, cline, metadata_test_dirty(cache, cline)); +} + +static void evp_lru_clean_end(void *private_data, int error) +{ + env_atomic *cleaning_in_progress = private_data; + + env_atomic_set(cleaning_in_progress, 0); +} + +static int evp_lru_clean_getter(struct ocf_cache *cache, + void *getter_context, uint32_t item, ocf_cache_line_t *line) +{ + union eviction_policy_meta eviction; + struct ocf_cleaner_attribs *attribs = getter_context; + ocf_cache_line_t prev_cline, curr_cline = attribs->getter_item; + + while (curr_cline < cache->device->collision_table_entries) { + ocf_metadata_get_evicition_policy(cache, curr_cline, + &eviction); + prev_cline = eviction.lru.prev; + + /* Prevent evicting already locked items */ + if (ocf_cache_line_is_used(cache, curr_cline)) { + curr_cline = prev_cline; + continue; + } + + ENV_BUG_ON(!metadata_test_dirty(cache, curr_cline)); + + *line = curr_cline; + attribs->getter_item = prev_cline; + return 0; + } + + return -1; +} + +static void evp_lru_clean(struct ocf_cache *cache, uint32_t io_queue, + ocf_part_id_t part_id, uint32_t count) +{ + env_atomic *progress = &cache->cleaning[part_id]; + struct ocf_user_part *part = &cache->user_parts[part_id]; + + if (ocf_mngt_is_cache_locked(cache)) + return; + + if (env_atomic_cmpxchg(progress, 0, 1) == 0) { + /* Initialize attributes for cleaner */ + struct ocf_cleaner_attribs attribs = { + .cache_line_lock = true, + .do_sort = true, + + .cmpl_context = progress, + .cmpl_fn = evp_lru_clean_end, + + .getter = evp_lru_clean_getter, + .getter_context = &attribs, + .getter_item = part->runtime->eviction.policy.lru.dirty_tail, + + .count = count > 32 ? 32 : count, + + .io_queue = io_queue + }; + + ocf_cleaner_fire(cache, &attribs); + } +} + +static void evp_lru_zero_line_complete(struct ocf_request *ocf_req, int error) +{ + env_atomic_dec(&ocf_req->cache->pending_eviction_clines); +} + +static void evp_lru_zero_line(struct ocf_cache *cache, uint32_t io_queue, + ocf_cache_line_t line) +{ + struct ocf_request *rq; + ocf_core_id_t id; + uint64_t addr, core_line; + + ocf_metadata_get_core_info(cache, line, &id, &core_line); + addr = core_line * ocf_line_size(cache); + + rq = ocf_rq_new(cache, id, addr, ocf_line_size(cache), OCF_WRITE); + if (rq) { + rq->info.internal = true; + rq->complete = evp_lru_zero_line_complete; + rq->io_queue = io_queue; + + env_atomic_inc(&cache->pending_eviction_clines); + + ocf_engine_zero_line(rq); + } +} + +bool evp_lru_can_evict(struct ocf_cache *cache) +{ + if (env_atomic_read(&cache->pending_eviction_clines) >= + OCF_PENDING_EVICTION_LIMIT) { + return false; + } + + return true; +} + +/* the caller must hold the metadata lock */ +uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue, + ocf_part_id_t part_id, uint32_t cline_no, ocf_core_id_t core_id) +{ + uint32_t i; + ocf_cache_line_t curr_cline, prev_cline; + struct ocf_user_part *part = &cache->user_parts[part_id]; + union eviction_policy_meta eviction; + + if (cline_no == 0) + return 0; + + i = 0; + curr_cline = part->runtime->eviction.policy.lru.clean_tail; + /* Find cachelines to be evicted. */ + while (i < cline_no) { + ENV_BUG_ON(curr_cline > cache->device->collision_table_entries); + + if (!evp_lru_can_evict(cache)) + break; + + if (curr_cline == cache->device->collision_table_entries) + break; + + ocf_metadata_get_evicition_policy(cache, curr_cline, + &eviction); + prev_cline = eviction.lru.prev; + + /* Prevent evicting already locked items */ + if (ocf_cache_line_is_used(cache, curr_cline)) { + curr_cline = prev_cline; + continue; + } + + ENV_BUG_ON(metadata_test_dirty(cache, curr_cline)); + + if (ocf_data_obj_is_atomic(&cache->device->obj)) { + /* atomic cache, we have to trim cache lines before + * eviction + */ + evp_lru_zero_line(cache, io_queue, curr_cline); + + } else { + set_cache_line_invalid_no_flush(cache, 0, + ocf_line_end_sector(cache), + curr_cline); + + /* Goto next item. */ + i++; + } + + curr_cline = prev_cline; + } + + if (i < cline_no && part->runtime->eviction.policy.lru.dirty_tail != + cache->device->collision_table_entries) { + evp_lru_clean(cache, io_queue, part_id, cline_no - i); + } + + /* Return number of clines that were really evicted */ + return i; +} + +/* the caller must hold the metadata lock */ +void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline) +{ + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, cline); + struct ocf_user_part *part = &cache->user_parts[part_id]; + + uint32_t prev_lru_node, next_lru_node; + uint32_t collision_table_entries = cache->device->collision_table_entries; + union eviction_policy_meta eviction; + + int cline_dirty; + + ocf_metadata_get_evicition_policy(cache, cline, &eviction); + + next_lru_node = eviction.lru.next; + prev_lru_node = eviction.lru.prev; + + cline_dirty = metadata_test_dirty(cache, cline); + + if ((next_lru_node != collision_table_entries) || + (prev_lru_node != collision_table_entries) || + ((part->runtime->eviction.policy.lru.clean_head == cline) && + (part->runtime->eviction.policy.lru.clean_tail == cline)) || + ((part->runtime->eviction.policy.lru.dirty_head == cline) && + (part->runtime->eviction.policy.lru.dirty_tail == cline))) { + remove_lru_list(cache, part_id, cline, cline_dirty); + } + + /* Update LRU */ + add_lru_head(cache, part_id, cline, cline_dirty); +} + +void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id) +{ + unsigned int collision_table_entries = + cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[part_id]; + + part->runtime->eviction.policy.lru.has_clean_nodes = 0; + part->runtime->eviction.policy.lru.has_dirty_nodes = 0; + part->runtime->eviction.policy.lru.clean_head = collision_table_entries; + part->runtime->eviction.policy.lru.clean_tail = collision_table_entries; + part->runtime->eviction.policy.lru.dirty_head = collision_table_entries; + part->runtime->eviction.policy.lru.dirty_tail = collision_table_entries; +} + +void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, + uint32_t cline) +{ + OCF_METADATA_EVICTION_LOCK(); + remove_lru_list(cache, part_id, cline, 1); + add_lru_head(cache, part_id, cline, 0); + OCF_METADATA_EVICTION_UNLOCK(); +} + +void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, + uint32_t cline) +{ + OCF_METADATA_EVICTION_LOCK(); + remove_lru_list(cache, part_id, cline, 0); + add_lru_head(cache, part_id, cline, 1); + OCF_METADATA_EVICTION_UNLOCK(); +} + diff --git a/src/eviction/lru.h b/src/eviction/lru.h new file mode 100644 index 0000000..7e2df4e --- /dev/null +++ b/src/eviction/lru.h @@ -0,0 +1,23 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __EVICTION_LRU_H__ +#define __EVICTION_LRU_H__ + +#include "eviction.h" +#include "lru_structs.h" + +void evp_lru_init_cline(struct ocf_cache *cache, + ocf_cache_line_t cline); +void evp_lru_rm_cline(struct ocf_cache *cache, ocf_cache_line_t cline); +bool evp_lru_can_evict(struct ocf_cache *cache); +uint32_t evp_lru_req_clines(struct ocf_cache *cache, uint32_t io_queue, + ocf_part_id_t part_id, uint32_t cline_no, + ocf_core_id_t core_id); +void evp_lru_hot_cline(struct ocf_cache *cache, ocf_cache_line_t cline); +void evp_lru_init_evp(struct ocf_cache *cache, ocf_part_id_t part_id); +void evp_lru_dirty_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline); +void evp_lru_clean_cline(struct ocf_cache *cache, ocf_part_id_t part_id, uint32_t cline); + +#endif diff --git a/src/eviction/lru_structs.h b/src/eviction/lru_structs.h new file mode 100644 index 0000000..813dd8c --- /dev/null +++ b/src/eviction/lru_structs.h @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __EVICTION_LRU_STRUCTS_H__ + +#define __EVICTION_LRU_STRUCTS_H__ + +struct lru_eviction_policy_meta { + /* LRU pointers 2*4=8 bytes */ + uint32_t prev; + uint32_t next; +} __attribute__((packed)); + +struct lru_eviction_policy { + int has_clean_nodes; + int has_dirty_nodes; + uint32_t dirty_head; + uint32_t dirty_tail; + uint32_t clean_head; + uint32_t clean_tail; +}; + +#endif diff --git a/src/eviction/ops.h b/src/eviction/ops.h new file mode 100644 index 0000000..53cab6b --- /dev/null +++ b/src/eviction/ops.h @@ -0,0 +1,108 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef LAYER_EVICTION_POLICY_OPS_H_ +#define LAYER_EVICTION_POLICY_OPS_H_ + +#include "eviction.h" +#include "../metadata/metadata.h" + +/** + * @brief Initialize cache line before adding it into eviction + * + * @note This operation is called under WR metadata lock + */ +static inline void ocf_eviction_init_cache_line(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_part_id_t part_id) +{ + uint8_t type; + + type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(type >= ocf_eviction_max); + + if (likely(evict_policy_ops[type].init_cline)) + evict_policy_ops[type].init_cline(cache, line); +} + +static inline void ocf_eviction_purge_cache_line( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + uint8_t type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(type >= ocf_eviction_max); + + if (likely(evict_policy_ops[type].rm_cline)) { + OCF_METADATA_EVICTION_LOCK(); + evict_policy_ops[type].rm_cline(cache, line); + OCF_METADATA_EVICTION_UNLOCK(); + } +} + + +static inline bool ocf_eviction_can_evict(struct ocf_cache *cache) +{ + uint8_t type = cache->conf_meta->eviction_policy_type; + + if (likely(evict_policy_ops[type].can_evict)) + return evict_policy_ops[type].can_evict(cache); + + return true; +} + +static inline uint32_t ocf_eviction_need_space(struct ocf_cache *cache, + uint32_t io_queue, ocf_part_id_t part_id, uint32_t clines, + ocf_core_id_t core_id) +{ + uint8_t type; + uint32_t result = 0; + + ENV_BUG_ON(core_id >= OCF_CORE_MAX); + + type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(type >= ocf_eviction_max); + + if (likely(evict_policy_ops[type].req_clines)) { + /* + * This is called under METADATA WR lock. No need to get + * eviction lock. + */ + result = evict_policy_ops[type].req_clines(cache, io_queue, + part_id, clines, core_id); + } + + return result; +} + +static inline void ocf_eviction_set_hot_cache_line( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + uint8_t type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(type >= ocf_eviction_max); + + if (likely(evict_policy_ops[type].hot_cline)) { + OCF_METADATA_EVICTION_LOCK(); + evict_policy_ops[type].hot_cline(cache, line); + OCF_METADATA_EVICTION_UNLOCK(); + } +} + +static inline void ocf_eviction_initialize(struct ocf_cache *cache, + ocf_part_id_t part_id) +{ + uint8_t type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(type >= ocf_eviction_max); + + if (likely(evict_policy_ops[type].init_evp)) { + OCF_METADATA_EVICTION_LOCK(); + evict_policy_ops[type].init_evp(cache, part_id); + OCF_METADATA_EVICTION_UNLOCK(); + } +} + +#endif /* LAYER_EVICTION_POLICY_OPS_H_ */ diff --git a/src/layer_space_management.c b/src/layer_space_management.c new file mode 100644 index 0000000..9408a19 --- /dev/null +++ b/src/layer_space_management.c @@ -0,0 +1,114 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "layer_space_management.h" +#include "utils/utils_allocator.h" +#include "utils/utils_part.h" +#include "concurrency/ocf_concurrency.h" +#include "engine/engine_common.h" +#include "eviction/ops.h" + +static uint32_t ocf_evict_calculate(struct ocf_user_part *part, + uint32_t to_evict) +{ + if (part->runtime->curr_size <= part->config->min_size) { + /* + * Cannot evict from this partition because current size + * is less than minimum size + */ + return 0; + } + + if (to_evict < OCF_TO_EVICTION_MIN) + to_evict = OCF_TO_EVICTION_MIN; + + if (to_evict > (part->runtime->curr_size - part->config->min_size)) + to_evict = part->runtime->curr_size - part->config->min_size; + + return to_evict; +} + +static inline uint32_t ocf_evict_do(struct ocf_cache *cache, + uint32_t io_queue, const uint32_t evict_cline_no, + ocf_core_id_t core_id, ocf_part_id_t target_part_id) +{ + uint32_t to_evict = 0, evicted = 0; + struct ocf_user_part *part; + struct ocf_user_part *target_part = &cache->user_parts[target_part_id]; + ocf_part_id_t part_id; + + /* For each partition from the lowest priority to highest one */ + for_each_part(cache, part, part_id) { + + if (!ocf_eviction_can_evict(cache)) + goto out; + + /* + * Check stop and continue conditions + */ + if (target_part->config->priority > part->config->priority) { + /* + * iterate partition have higher priority, do not evict + */ + break; + } + if (!part->config->flags.eviction) { + /* It seams that no more partition for eviction */ + break; + } + if (part_id == target_part_id) { + /* Omit targeted, evict from different first */ + continue; + } + if (evicted >= evict_cline_no) { + /* Evicted requested number of cache line, stop */ + goto out; + } + + to_evict = ocf_evict_calculate(part, evict_cline_no); + if (to_evict == 0) { + /* No cache lines to evict for this partition */ + continue; + } + + evicted += ocf_eviction_need_space(cache, io_queue, + part_id, to_evict, core_id); + } + + if (!ocf_eviction_can_evict(cache)) + goto out; + + if (evicted < evict_cline_no) { + /* Now we can evict form targeted partition */ + to_evict = ocf_evict_calculate(target_part, evict_cline_no); + if (to_evict) { + evicted += ocf_eviction_need_space(cache, io_queue, + target_part_id, to_evict, core_id); + } + } + +out: + return evicted; +} + +int space_managment_evict_do(struct ocf_cache *cache, + struct ocf_request *req, uint32_t evict_cline_no) +{ + uint32_t evicted; + + if (evict_cline_no <= cache->device->freelist_part->curr_size) + return LOOKUP_MAPPED; + + evict_cline_no = evict_cline_no - cache->device->freelist_part->curr_size; + evicted = ocf_evict_do(cache, req->io_queue, evict_cline_no, + req->core_id, req->part_id); + + if (evict_cline_no <= evicted) + return LOOKUP_MAPPED; + + req->info.eviction_error |= true; + return LOOKUP_MISS; +} diff --git a/src/layer_space_management.h b/src/layer_space_management.h new file mode 100644 index 0000000..e7b325e --- /dev/null +++ b/src/layer_space_management.h @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __LAYER_SPACE_MANAGEMENT_H__ + +#define __LAYER_SPACE_MANAGEMENT_H__ + +#include "ocf_request.h" + +#define OCF_TO_EVICTION_MIN 128UL + +/* + * Deallocates space from low priority partitions. + * + * Returns -1 on error + * or the destination partition ID for the free buffers + * (it matches label and is part of the object (#core_id) IO group) + */ +int space_managment_evict_do(struct ocf_cache *cache, + struct ocf_request *req, uint32_t evict_cline_no); + +int space_management_free(struct ocf_cache *cache, uint32_t count); + +#endif diff --git a/src/metadata/metadata.c b/src/metadata/metadata.c new file mode 100644 index 0000000..5131c77 --- /dev/null +++ b/src/metadata/metadata.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" + +#include "metadata.h" +#include "metadata_hash.h" +#include "metadata_io.h" +#include "../ocf_priv.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" + +#define OCF_METADATA_DEBUG 0 + +#if 1 == OCF_METADATA_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__) +#else +#define OCF_DEBUG_TRACE(cache) +#endif + +int ocf_metadata_init(struct ocf_cache *cache, + ocf_cache_line_size_t cache_line_size) +{ + struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *) + &cache->metadata.iface; + int ret; + + OCF_DEBUG_TRACE(cache); + + ENV_BUG_ON(cache->metadata.iface_priv); + + ret = ocf_metadata_io_init(cache); + if (ret) + return ret; + + *iface = *metadata_hash_get_iface(); + ret = cache->metadata.iface.init(cache, cache_line_size); + if (ret) + ocf_metadata_io_deinit(cache); + + return ret; +} + +int ocf_metadata_init_variable_size(struct ocf_cache *cache, uint64_t device_size, + ocf_cache_line_size_t cache_line_size, + ocf_metadata_layout_t layout) +{ + OCF_DEBUG_TRACE(cache); + return cache->metadata.iface.init_variable_size(cache, device_size, + cache_line_size, layout); +} + +void ocf_metadata_init_freelist_partition(struct ocf_cache *cache) +{ + OCF_DEBUG_TRACE(cache); + cache->metadata.iface.layout_iface->init_freelist(cache); +} + +void ocf_metadata_init_hash_table(struct ocf_cache *cache) +{ + OCF_DEBUG_TRACE(cache); + cache->metadata.iface.init_hash_table(cache); +} + +void ocf_metadata_deinit(struct ocf_cache *cache) +{ + OCF_DEBUG_TRACE(cache); + + if (cache->metadata.iface.deinit) { + cache->metadata.iface.deinit(cache); + } + + ocf_metadata_io_deinit(cache); +} + +void ocf_metadata_deinit_variable_size(struct ocf_cache *cache) +{ + OCF_DEBUG_TRACE(cache); + + if (cache->metadata.iface.deinit_variable_size) + cache->metadata.iface.deinit_variable_size(cache); +} + +size_t ocf_metadata_size_of(struct ocf_cache *cache) +{ + return cache->metadata.iface.size_of(cache); +} + +void ocf_metadata_error(struct ocf_cache *cache) +{ + if (cache->device->metadata_error == 0) + ocf_cache_log(cache, log_err, "Metadata Error\n"); + + env_bit_clear(ocf_cache_state_running, &cache->cache_state); + cache->device->metadata_error = -1; +} + +ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache) +{ + return cache->metadata.iface.pages(cache); +} + +ocf_cache_line_t +ocf_metadata_get_cachelines_count(struct ocf_cache *cache) +{ + return cache->metadata.iface.cachelines(cache); +} + +int ocf_metadata_flush_all(struct ocf_cache *cache) +{ + int result; + + OCF_METADATA_LOCK_WR(); + result = cache->metadata.iface.flush_all(cache); + OCF_METADATA_UNLOCK_WR(); + return result; +} + +void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line) +{ + cache->metadata.iface.flush(cache, line); +} + +int ocf_metadata_load_all(struct ocf_cache *cache) +{ + int result; + + OCF_METADATA_LOCK_WR(); + result = cache->metadata.iface.load_all(cache); + OCF_METADATA_UNLOCK_WR(); + return result; +} + +int ocf_metadata_load_recovery(struct ocf_cache *cache) +{ + return cache->metadata.iface.load_recovery(cache); +} + +void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) +{ + cache->metadata.iface.flush_mark(cache, rq, map_idx, to_state, + start, stop); +} + +void ocf_metadata_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, ocf_end_t complete) +{ + cache->metadata.iface.flush_do_asynch(cache, rq, complete); +} + +static inline int ocf_metadata_check_properties(void) +{ + uint32_t field_offset; + + /* Because metadata basic properties are on the beginning of super block + * read/write only first page of supper block. + * + * For safety reason check if offset of metadata properties are in first + * page of super block. + * + * Maybe in future super block fields order may be changed and metadata + * variant may go out first page of super block + */ + + field_offset = offsetof(struct ocf_superblock_config, line_size); + ENV_BUG_ON(field_offset >= PAGE_SIZE); + + /* The same checking for magic number */ + field_offset = offsetof(struct ocf_superblock_config, magic_number); + ENV_BUG_ON(field_offset >= PAGE_SIZE); + + /* The same checking for IO interface type */ + field_offset = offsetof(struct ocf_superblock_config, cache_mode); + ENV_BUG_ON(field_offset >= PAGE_SIZE); + + /* And the same for version location within superblock structure */ + field_offset = offsetof(struct ocf_superblock_config, metadata_version); + ENV_BUG_ON(field_offset >= PAGE_SIZE); + + return 0; +} + +static int ocf_metadata_read_properties(ocf_ctx_t ctx, ocf_data_obj_t cache_obj, + struct ocf_superblock_config *superblock) +{ + ctx_data_t *data; + struct ocf_io *io; + int result = 0; + + if (ocf_metadata_check_properties()) + return -EINVAL; + + /* Allocate resources for IO */ + io = ocf_dobj_new_io(cache_obj); + data = ctx_data_alloc(ctx, 1); + + /* Check allocation result */ + if (!io || !data) { + ocf_log(ctx, log_err, "Memory allocation error"); + result = -ENOMEM; + goto out; + } + + /* + * Read first page of cache device in order to recover metadata + * properties + */ + result = ocf_io_set_data(io, data, 0); + if (result) { + ocf_log(ctx, log_err, "Metadata IO configuration error\n"); + result = -EIO; + goto out; + } + ocf_io_configure(io, 0, PAGE_SIZE, OCF_READ, 0, 0); + result = ocf_submit_io_wait(io); + if (result) { + ocf_log(ctx, log_err, "Metadata IO request submit error\n"); + result = -EIO; + goto out; + } + + /* Read data from data into super block buffer */ + ctx_data_rd_check(ctx, superblock, data, + PAGE_SIZE); + +out: + if (io) + ocf_io_put(io); + ctx_data_free(ctx, data); + + return result; +} + +/** + * @brief function loads individual properties from metadata set + * @param cache_obj object from which to load metadata + * @param variant - field to which save metadata variant; if NULL, + * metadata variant won't be read. + * @param cache mode; if NULL is passed it won't be read + * @param shutdown_status - dirty shutdown or clean shutdown + * @param dirty_flushed - if all dirty data was flushed prior to closing + * the cache + * @return 0 upon successful completion + */ +int ocf_metadata_load_properties(ocf_data_obj_t cache_obj, + ocf_cache_line_size_t *line_size, + ocf_metadata_layout_t *layout, + ocf_cache_mode_t *cache_mode, + enum ocf_metadata_shutdown_status *shutdown_status, + uint8_t *dirty_flushed) +{ + struct ocf_superblock_config *superblock; + int err_value = 0; + + /* Allocate first page of super block */ + superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (!superblock) { + ocf_cache_log(cache_obj->cache, log_err, + "Allocation memory error"); + return -ENOMEM; + } + + OCF_DEBUG_TRACE(cache); + + err_value = ocf_metadata_read_properties(cache_obj->cache->owner, + cache_obj, superblock); + if (err_value) + goto ocf_metadata_load_variant_ERROR; + + if (superblock->magic_number != CACHE_MAGIC_NUMBER) { + err_value = -ENODATA; + ocf_cache_log(cache_obj->cache, log_info, + "Can not detect pre-existing metadata\n"); + goto ocf_metadata_load_variant_ERROR; + } + + if (METADATA_VERSION() != superblock->metadata_version) { + err_value = -EBADF; + ocf_cache_log(cache_obj->cache, log_err, + "Metadata version mismatch!\n"); + goto ocf_metadata_load_variant_ERROR; + } + + if (line_size) { + if (ocf_cache_line_size_is_valid(superblock->line_size)) { + *line_size = superblock->line_size; + } else { + err_value = -EINVAL; + ocf_cache_log(cache_obj->cache, log_err, + "ERROR: Invalid cache line size!\n"); + } + } + + if (layout) { + if (superblock->metadata_layout >= ocf_metadata_layout_max || + superblock->metadata_layout < 0) { + err_value = -EINVAL; + ocf_cache_log(cache_obj->cache, log_err, + "ERROR: Invalid metadata layout!\n"); + } else { + *layout = superblock->metadata_layout; + } + } + + if (cache_mode) { + if (superblock->cache_mode < ocf_cache_mode_max) { + *cache_mode = superblock->cache_mode; + } else { + ocf_cache_log(cache_obj->cache, log_err, + "ERROR: Invalid cache mode!\n"); + err_value = -EINVAL; + } + } + + if (shutdown_status != NULL) { + if (superblock->clean_shutdown <= ocf_metadata_clean_shutdown) { + *shutdown_status = superblock->clean_shutdown; + } else { + ocf_cache_log(cache_obj->cache, log_err, + "ERROR: Invalid shutdown status!\n"); + err_value = -EINVAL; + } + } + + if (dirty_flushed != NULL) { + if (superblock->dirty_flushed <= DIRTY_FLUSHED) { + *dirty_flushed = superblock->dirty_flushed; + } else { + ocf_cache_log(cache_obj->cache, log_err, + "ERROR: Invalid flush status!\n"); + err_value = -EINVAL; + } + } + +ocf_metadata_load_variant_ERROR: + + env_free(superblock); + return err_value; +} + +int ocf_metadata_probe(ocf_ctx_t ctx, ocf_data_obj_t cache_obj, + bool *clean_shutdown, bool *cache_dirty) +{ + struct ocf_superblock_config *superblock; + int result = 0; + + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(cache_obj); + + /* Allocate first page of super block */ + superblock = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (!superblock) { + ocf_log(ctx, log_err, "Allocation memory error"); + return -ENOMEM; + } + + OCF_DEBUG_TRACE(cache); + + result = ocf_metadata_read_properties(ctx, cache_obj, superblock); + if (result) + goto ocf_metadata_probe_END; + + if (superblock->magic_number != CACHE_MAGIC_NUMBER) { + result = -ENODATA; + goto ocf_metadata_probe_END; + } + + if (clean_shutdown != NULL) { + *clean_shutdown = (superblock->clean_shutdown != + ocf_metadata_dirty_shutdown); + } + + if (cache_dirty != NULL) + *cache_dirty = (superblock->dirty_flushed == DIRTY_NOT_FLUSHED); + + if (METADATA_VERSION() != superblock->metadata_version) + result = -EBADF; + +ocf_metadata_probe_END: + + env_free(superblock); + return result; +} + diff --git a/src/metadata/metadata.h b/src/metadata/metadata.h new file mode 100644 index 0000000..1fae063 --- /dev/null +++ b/src/metadata/metadata.h @@ -0,0 +1,336 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_H__ +#define __METADATA_H__ + +#include "../ocf_cache_priv.h" +#include "../ocf_ctx_priv.h" + +static inline void ocf_metadata_eviction_lock(struct ocf_cache *cache) +{ + env_spinlock_lock(&cache->metadata.lock.eviction); +} + +static inline void ocf_metadata_eviction_unlock(struct ocf_cache *cache) +{ + env_spinlock_unlock(&cache->metadata.lock.eviction); +} + +#define OCF_METADATA_EVICTION_LOCK() \ + ocf_metadata_eviction_lock(cache) + +#define OCF_METADATA_EVICTION_UNLOCK() \ + ocf_metadata_eviction_unlock(cache) + +static inline void ocf_metadata_lock(struct ocf_cache *cache, int rw) +{ + if (rw == OCF_METADATA_WR) + env_rwsem_down_write(&cache->metadata.lock.collision); + else if (rw == OCF_METADATA_RD) + env_rwsem_down_read(&cache->metadata.lock.collision); + else + ENV_BUG(); +} + + +static inline void ocf_metadata_unlock(struct ocf_cache *cache, int rw) +{ + if (rw == OCF_METADATA_WR) + env_rwsem_up_write(&cache->metadata.lock.collision); + else if (rw == OCF_METADATA_RD) + env_rwsem_up_read(&cache->metadata.lock.collision); + else + ENV_BUG(); +} + +static inline int ocf_metadata_try_lock(struct ocf_cache *cache, int rw) +{ + int result = -1; + + if (rw == OCF_METADATA_WR) { + result = env_rwsem_down_write_trylock( + &cache->metadata.lock.collision); + } else if (rw == OCF_METADATA_RD) { + result = env_rwsem_down_read_trylock( + &cache->metadata.lock.collision); + } else { + ENV_BUG(); + } + + if (!result) + return -1; + + return 0; +} + +static inline void ocf_metadata_status_bits_lock( + struct ocf_cache *cache, int rw) +{ + if (rw == OCF_METADATA_WR) + env_rwlock_write_lock(&cache->metadata.lock.status); + else if (rw == OCF_METADATA_RD) + env_rwlock_read_lock(&cache->metadata.lock.status); + else + ENV_BUG(); +} + +static inline void ocf_metadata_status_bits_unlock( + struct ocf_cache *cache, int rw) +{ + if (rw == OCF_METADATA_WR) + env_rwlock_write_unlock(&cache->metadata.lock.status); + else if (rw == OCF_METADATA_RD) + env_rwlock_read_unlock(&cache->metadata.lock.status); + else + ENV_BUG(); +} + +#define OCF_METADATA_LOCK_RD() \ + ocf_metadata_lock(cache, OCF_METADATA_RD) + +#define OCF_METADATA_UNLOCK_RD() \ + ocf_metadata_unlock(cache, OCF_METADATA_RD) + +#define OCF_METADATA_LOCK_RD_TRY() \ + ocf_metadata_try_lock(cache, OCF_METADATA_RD) + +#define OCF_METADATA_LOCK_WR() \ + ocf_metadata_lock(cache, OCF_METADATA_WR) + +#define OCF_METADATA_LOCK_WR_TRY() \ + ocf_metadata_try_lock(cache, OCF_METADATA_WR) + +#define OCF_METADATA_UNLOCK_WR() \ + ocf_metadata_unlock(cache, OCF_METADATA_WR) + +#define OCF_METADATA_BITS_LOCK_RD() \ + ocf_metadata_status_bits_lock(cache, OCF_METADATA_RD) + +#define OCF_METADATA_BITS_UNLOCK_RD() \ + ocf_metadata_status_bits_unlock(cache, OCF_METADATA_RD) + +#define OCF_METADATA_BITS_LOCK_WR() \ + ocf_metadata_status_bits_lock(cache, OCF_METADATA_WR) + +#define OCF_METADATA_BITS_UNLOCK_WR() \ + ocf_metadata_status_bits_unlock(cache, OCF_METADATA_WR) + +#define OCF_METADATA_FLUSH_LOCK() \ + ocf_metadata_flush_lock(cache) + +#define OCF_METADATA_FLUSH_UNLOCK() \ + ocf_metadata_flush_unlock(cache) + +#include "metadata_cleaning_policy.h" +#include "metadata_eviction_policy.h" +#include "metadata_partition.h" +#include "metadata_hash.h" +#include "metadata_superblock.h" +#include "metadata_status.h" +#include "metadata_collision.h" +#include "metadata_core.h" +#include "metadata_misc.h" + +#define INVALID 0 +#define VALID 1 +#define CLEAN 2 +#define DIRTY 3 + +/** + * @brief Initialize metadata + * + * @param cache - Cache instance + * @param cache_line_size Cache line size + * @return 0 - Operation success otherwise failure + */ +int ocf_metadata_init(struct ocf_cache *cache, + ocf_cache_line_size_t cache_line_size); + +/** + * @brief Initialize per-cacheline metadata + * + * @param cache - Cache instance + * @param device_size - Device size in bytes + * @param cache_line_size Cache line size + * @return 0 - Operation success otherwise failure + */ +int ocf_metadata_init_variable_size(struct ocf_cache *cache, + uint64_t device_size, ocf_cache_line_size_t cache_line_size, + ocf_metadata_layout_t layout); + +/** + * @brief Initialize collision table + * + * @param cache - Cache instance + */ +void ocf_metadata_init_freelist_partition(struct ocf_cache *cache); + +/** + * @brief Initialize hash table + * + * @param cache - Cache instance + */ +void ocf_metadata_init_hash_table(struct ocf_cache *cache); + +/** + * @brief De-Initialize metadata + * + * @param cache - Cache instance + */ +void ocf_metadata_deinit(struct ocf_cache *cache); + +/** + * @brief De-Initialize per-cacheline metadata + * + * @param cache - Cache instance + */ +void ocf_metadata_deinit_variable_size(struct ocf_cache *cache); + +/** + * @brief Get memory footprint + * + * @param cache - Cache instance + * @return 0 - memory footprint + */ +size_t ocf_metadata_size_of(struct ocf_cache *cache); + +/** + * @brief Handle metadata error + * + * @param cache - Cache instance + */ +void ocf_metadata_error(struct ocf_cache *cache); + +/** + * @brief Get amount of cache lines + * + * @param cache - Cache instance + * @return Amount of cache lines (cache device lines - metadata space) + */ +ocf_cache_line_t +ocf_metadata_get_cachelines_count(struct ocf_cache *cache); + +/** + * @brief Get amount of pages required for metadata + * + * @param cache - Cache instance + * @return Pages required for store metadata on cache device + */ +ocf_cache_line_t ocf_metadata_get_pages_count(struct ocf_cache *cache); + +/** + * @brief Flush metadata + * + * @param cache + * @return 0 - Operation success otherwise failure + */ +int ocf_metadata_flush_all(struct ocf_cache *cache); + + +/** + * @brief Flush metadata for specified cache line + * + * @param[in] cache - Cache instance + * @param[in] line - cache line which to be flushed + */ +void ocf_metadata_flush(struct ocf_cache *cache, ocf_cache_line_t line); + +/** + * @brief Mark specified cache line to be flushed + * + * @param[in] cache - Cache instance + * @param[in] line - cache line which to be flushed + */ +void ocf_metadata_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); + +/** + * @brief Flush marked cache lines asynchronously + * + * @param cache - Cache instance + * @param queue - I/O queue to which metadata flush should be submitted + * @param remaining - request remaining + * @param complete - flushing request callback + * @param context - context that will be passed into callback + */ +void ocf_metadata_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, ocf_end_t complete); + +/** + * @brief Load metadata + * + * @param cache - Cache instance + * @return 0 - Operation success otherwise failure + */ +int ocf_metadata_load_all(struct ocf_cache *cache); + +/** + * @brief Load metadata required for recovery procedure + * + * @param cache Cache instance + * @return 0 - Operation success otherwise failure + */ +int ocf_metadata_load_recovery(struct ocf_cache *cache); + +/* + * NOTE Hash table is specific for hash table metadata service implementation + * and should be used internally by metadata service. + * At the moment there is no high level metadata interface because of that + * temporary defined in this file. + */ + +static inline ocf_cache_line_t +ocf_metadata_get_hash(struct ocf_cache *cache, ocf_cache_line_t index) +{ + return cache->metadata.iface.get_hash(cache, index); +} + +static inline void ocf_metadata_set_hash(struct ocf_cache *cache, + ocf_cache_line_t index, ocf_cache_line_t line) +{ + cache->metadata.iface.set_hash(cache, index, line); +} + +static inline void ocf_metadata_flush_hash(struct ocf_cache *cache, + ocf_cache_line_t index) +{ + cache->metadata.iface.flush_hash(cache, index); +} + +static inline ocf_cache_line_t ocf_metadata_entries_hash( + struct ocf_cache *cache) +{ + return cache->metadata.iface.entries_hash(cache); +} + +int ocf_metadata_load_properties(ocf_data_obj_t cache_obj, + ocf_cache_line_size_t *line_size, + ocf_metadata_layout_t *layout, + ocf_cache_mode_t *cache_mode, + enum ocf_metadata_shutdown_status *shutdown_status, + uint8_t *dirty_flushed); + +/** + * @brief Validate cache line size + * + * @param size Cache line size + * @return true - cache line size is valid, false - cache line is invalid + */ +static inline bool ocf_metadata_line_size_is_valid(uint32_t size) +{ + switch (size) { + case 4 * KiB: + case 8 * KiB: + case 16 * KiB: + case 32 * KiB: + case 64 * KiB: + return true; + default: + return false; + } +} + +#endif /* METADATA_H_ */ diff --git a/src/metadata/metadata_bit.h b/src/metadata/metadata_bit.h new file mode 100644 index 0000000..b54d040 --- /dev/null +++ b/src/metadata/metadata_bit.h @@ -0,0 +1,240 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/******************************************************************************* + * Sector mask getter + ******************************************************************************/ + +static inline uint64_t _get_mask(uint8_t start, uint8_t stop) +{ + uint64_t mask = 0; + + ENV_BUG_ON(start >= 64); + ENV_BUG_ON(stop >= 64); + ENV_BUG_ON(stop < start); + + mask = ~mask; + mask >>= start + (63 - stop); + mask <<= start; + + return mask; +} + +#define _get_mask_u8(start, stop) _get_mask(start, stop) +#define _get_mask_u16(start, stop) _get_mask(start, stop) +#define _get_mask_u32(start, stop) _get_mask(start, stop) +#define _get_mask_u64(start, stop) _get_mask(start, stop) + +typedef __uint128_t u128; + +static inline u128 _get_mask_u128(uint8_t start, uint8_t stop) +{ + u128 mask = 0; + + ENV_BUG_ON(start >= 128); + ENV_BUG_ON(stop >= 128); + ENV_BUG_ON(stop < start); + + mask = ~mask; + mask >>= start + (127 - stop); + mask <<= start; + + return mask; +} + +#define ocf_metadata_bit_struct(type) \ +struct ocf_metadata_map_##type { \ + struct ocf_metadata_map map; \ + type valid; \ + type dirty; \ +} __attribute__((packed)) + +#define ocf_metadata_bit_func(what, type) \ +static bool _ocf_metadata_test_##what##_##type(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all) \ +{ \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + const struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + if (all) { \ + if (mask == (map[line].what & mask)) { \ + return true; \ + } else { \ + return false; \ + } \ + } else { \ + if (map[line].what & mask) { \ + return true; \ + } else { \ + return false; \ + } \ + } \ +} \ +\ +static bool _ocf_metadata_test_out_##what##_##type(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + const struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + if (map[line].what & ~mask) { \ + return true; \ + } else { \ + return false; \ + } \ +} \ +\ +static bool _ocf_metadata_clear_##what##_##type(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + map[line].what &= ~mask; \ +\ + if (map[line].what) { \ + return true; \ + } else { \ + return false; \ + } \ +} \ +\ +static bool _ocf_metadata_set_##what##_##type(struct ocf_cache *cache, \ + ocf_cache_line_t line, uint8_t start, uint8_t stop) \ +{ \ + bool result; \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + result = map[line].what ? true : false; \ +\ + map[line].what |= mask; \ +\ + return result; \ +} \ +\ +static bool _ocf_metadata_test_and_set_##what##_##type( \ + struct ocf_cache *cache, ocf_cache_line_t line, \ + uint8_t start, uint8_t stop, bool all) \ +{ \ + bool test; \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + if (all) { \ + if (mask == (map[line].what & mask)) { \ + test = true; \ + } else { \ + test = false; \ + } \ + } else { \ + if (map[line].what & mask) { \ + test = true; \ + } else { \ + test = false; \ + } \ + } \ +\ + map[line].what |= mask; \ + return test; \ +} \ +\ +static bool _ocf_metadata_test_and_clear_##what##_##type( \ + struct ocf_cache *cache, ocf_cache_line_t line, \ + uint8_t start, uint8_t stop, bool all) \ +{ \ + bool test; \ + type mask = _get_mask_##type(start, stop); \ +\ + struct ocf_metadata_hash_ctrl *ctrl = \ + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; \ +\ + struct ocf_metadata_raw *raw = \ + &ctrl->raw_desc[metadata_segment_collision]; \ +\ + struct ocf_metadata_map_##type *map = raw->mem_pool; \ +\ + _raw_bug_on(raw, line, sizeof(*map)); \ +\ + if (all) { \ + if (mask == (map[line].what & mask)) { \ + test = true; \ + } else { \ + test = false; \ + } \ + } else { \ + if (map[line].what & mask) { \ + test = true; \ + } else { \ + test = false; \ + } \ + } \ +\ + map[line].what &= ~mask; \ + return test; \ +} \ + +ocf_metadata_bit_struct(u8); +ocf_metadata_bit_struct(u16); +ocf_metadata_bit_struct(u32); +ocf_metadata_bit_struct(u64); +ocf_metadata_bit_struct(u128); + +ocf_metadata_bit_func(dirty, u8); +ocf_metadata_bit_func(dirty, u16); +ocf_metadata_bit_func(dirty, u32); +ocf_metadata_bit_func(dirty, u64); +ocf_metadata_bit_func(dirty, u128); + +ocf_metadata_bit_func(valid, u8); +ocf_metadata_bit_func(valid, u16); +ocf_metadata_bit_func(valid, u32); +ocf_metadata_bit_func(valid, u64); +ocf_metadata_bit_func(valid, u128); diff --git a/src/metadata/metadata_cleaning_policy.h b/src/metadata/metadata_cleaning_policy.h new file mode 100644 index 0000000..38098a1 --- /dev/null +++ b/src/metadata/metadata_cleaning_policy.h @@ -0,0 +1,39 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_CLEANING_POLICY_H__ +#define __METADATA_CLEANING_POLICY_H__ + +/* + * GET + */ +static inline void +ocf_metadata_get_cleaning_policy(struct ocf_cache *cache, + ocf_cache_line_t line, struct cleaning_policy_meta *policy) +{ + cache->metadata.iface.get_cleaning_policy(cache, line, policy); +} + +/* + * SET + */ +static inline void +ocf_metadata_set_cleaning_policy(struct ocf_cache *cache, + ocf_cache_line_t line, struct cleaning_policy_meta *policy) +{ + cache->metadata.iface.set_cleaning_policy(cache, line, policy); +} + +/* + * FLUSH + */ +static inline void +ocf_metadata_flush_cleaning_policy(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + cache->metadata.iface.flush_cleaning_policy(cache, line); +} + +#endif /* METADATA_CLEANING_POLICY_H_ */ diff --git a/src/metadata/metadata_collision.c b/src/metadata/metadata_collision.c new file mode 100644 index 0000000..cea5f4a --- /dev/null +++ b/src/metadata/metadata_collision.c @@ -0,0 +1,88 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "metadata.h" +#include "../utils/utils_cache_line.h" + +/* + * + */ +void ocf_metadata_add_to_collision(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t core_line, + ocf_cache_line_t hash, ocf_cache_line_t cache_line) +{ + ocf_cache_line_t prev_cache_line = ocf_metadata_get_hash(cache, hash); + ocf_cache_line_t line_entries = cache->device->collision_table_entries; + ocf_cache_line_t hash_entries = cache->device->hash_table_entries; + + ENV_BUG_ON(!(hash < hash_entries)); + ENV_BUG_ON(!(cache_line < line_entries)); + + /* Setup new node */ + ocf_metadata_set_core_info(cache, cache_line, core_id, + core_line); + + /* Update collision info: + * - next is set to value from hash table; + * - previous is set to collision table entries value + */ + ocf_metadata_set_collision_info(cache, cache_line, prev_cache_line, + line_entries); + + /* Update previous head */ + if (prev_cache_line != line_entries) { + ocf_metadata_set_collision_prev(cache, prev_cache_line, + cache_line); + } + + /* Update hash Table: hash table contains pointer to + * collision table so it contains indexes in collision table + */ + ocf_metadata_set_hash(cache, hash, cache_line); +} + +/* + * + */ +void ocf_metadata_remove_from_collision(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_part_id_t part_id) +{ + ocf_core_id_t core_id; + uint64_t core_sector; + ocf_cache_line_t hash_father; + ocf_cache_line_t prev_line, next_line; + ocf_cache_line_t line_entries = cache->device->collision_table_entries; + ocf_cache_line_t hash_entries = cache->device->hash_table_entries; + + ENV_BUG_ON(!(line < line_entries)); + + ocf_metadata_get_collision_info(cache, line, &next_line, &prev_line); + + /* Update previous node if any. */ + if (prev_line != line_entries) + ocf_metadata_set_collision_next(cache, prev_line, next_line); + + /* Update next node if any. */ + if (next_line != line_entries) + ocf_metadata_set_collision_prev(cache, next_line, prev_line); + + ocf_metadata_get_core_info(cache, line, &core_id, &core_sector); + + /* Update hash table, because if it was pointing to the given node it + * must now point to the given's node next + */ + hash_father = ocf_metadata_hash_func(cache, core_sector, core_id); + ENV_BUG_ON(!(hash_father < hash_entries)); + + if (ocf_metadata_get_hash(cache, hash_father) == line) + ocf_metadata_set_hash(cache, hash_father, next_line); + + ocf_metadata_set_collision_info(cache, line, + line_entries, line_entries); + + ocf_metadata_set_core_info(cache, line, + OCF_CORE_MAX, ULLONG_MAX); +} diff --git a/src/metadata/metadata_collision.h b/src/metadata/metadata_collision.h new file mode 100644 index 0000000..7eb8bef --- /dev/null +++ b/src/metadata/metadata_collision.h @@ -0,0 +1,102 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_COLLISION_H__ +#define __METADATA_COLLISION_H__ + +/** + * @brief Metadata map structure + */ + +struct ocf_metadata_list_info { + ocf_cache_line_t prev_col; + /*!< Previous cache line in collision list */ + ocf_cache_line_t next_col; + /*!< Next cache line in collision list*/ + ocf_cache_line_t partition_prev; + /*!< Previous cache line in the same partition*/ + ocf_cache_line_t partition_next; + /*!< Next cache line in the same partition*/ + ocf_part_id_t partition_id : 8; + /*!< ID of partition where is assigned this cache line*/ +} __attribute__((packed)); + +/** + * @brief Metadata map structure + */ + +struct ocf_metadata_map { + uint64_t core_line; + /*!< Core line addres on cache mapped by this strcture */ + + uint16_t core_id; + /*!< ID of core where is assigned this cache line*/ + + uint8_t status[]; + /*!< Entry status structure e.g. valid, dirty...*/ +} __attribute__((packed)); + +static inline ocf_cache_line_t ocf_metadata_map_lg2phy( + struct ocf_cache *cache, ocf_cache_line_t coll_idx) +{ + return cache->metadata.iface.layout_iface->lg2phy(cache, + coll_idx); +} + +static inline ocf_cache_line_t ocf_metadata_map_phy2lg( + struct ocf_cache *cache, ocf_cache_line_t cache_line) +{ + return cache->metadata.iface.layout_iface->phy2lg(cache, + cache_line); +} + +static inline void ocf_metadata_set_collision_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next, ocf_cache_line_t prev) +{ + cache->metadata.iface.set_collision_info(cache, line, next, prev); +} + +static inline void ocf_metadata_get_collision_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t *next, ocf_cache_line_t *prev) +{ + cache->metadata.iface.get_collision_info(cache, line, next, prev); +} + +static inline void ocf_metadata_set_collision_next( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next) +{ + cache->metadata.iface.set_collision_next(cache, line, next); +} + +static inline void ocf_metadata_set_collision_prev( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t prev) +{ + cache->metadata.iface.set_collision_prev(cache, line, prev); +} + +static inline ocf_cache_line_t ocf_metadata_get_collision_next( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_collision_next(cache, line); +} + +static inline ocf_cache_line_t ocf_metadata_get_collision_prev( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_collision_prev(cache, line); +} + +void ocf_metadata_add_to_collision(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t core_line, + ocf_cache_line_t hash, ocf_cache_line_t cache_line); + +void ocf_metadata_remove_from_collision(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_part_id_t part_id); + +#endif /* METADATA_COLLISION_H_ */ diff --git a/src/metadata/metadata_core.h b/src/metadata/metadata_core.h new file mode 100644 index 0000000..ca1dac2 --- /dev/null +++ b/src/metadata/metadata_core.h @@ -0,0 +1,51 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_CORE_H__ +#define __METADATA_CORE_H__ + +static inline void ocf_metadata_set_core_info(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t core_id, + uint64_t core_sector) +{ + cache->metadata.iface.set_core_info(cache, line, core_id, + core_sector); +} + +static inline void ocf_metadata_get_core_info(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t *core_id, + uint64_t *core_sector) +{ + cache->metadata.iface.get_core_info(cache, line, core_id, + core_sector); +} + +static inline void ocf_metadata_get_core_and_part_id( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_core_id_t *core_id, ocf_part_id_t *part_id) +{ + cache->metadata.iface.get_core_and_part_id(cache, line, core_id, + part_id); +} + +static inline ocf_core_id_t ocf_metadata_get_core_id( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_core_id(cache, line); +} + +static inline uint64_t ocf_metadata_get_core_sector( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_core_sector(cache, line); +} + +static inline struct ocf_metadata_uuid *ocf_metadata_get_core_uuid( + struct ocf_cache *cache, ocf_core_id_t core_id) +{ + return cache->metadata.iface.get_core_uuid(cache, core_id); +} + +#endif /* METADATA_CORE_H_ */ diff --git a/src/metadata/metadata_eviction_policy.h b/src/metadata/metadata_eviction_policy.h new file mode 100644 index 0000000..8797edb --- /dev/null +++ b/src/metadata/metadata_eviction_policy.h @@ -0,0 +1,35 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_EVICTION_H__ +#define __METADATA_EVICTION_H__ + +static inline void ocf_metadata_get_evicition_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + union eviction_policy_meta *eviction) +{ + cache->metadata.iface.get_eviction_policy(cache, line, eviction); +} + +/* + * SET + */ +static inline void ocf_metadata_set_evicition_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + union eviction_policy_meta *eviction) +{ + cache->metadata.iface.set_eviction_policy(cache, line, eviction); +} + +/* + * FLUSH + */ +static inline void ocf_metadata_flush_evicition_policy( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + cache->metadata.iface.flush_eviction_policy(cache, line); +} + +#endif /* METADATA_EVICTION_H_ */ diff --git a/src/metadata/metadata_hash.c b/src/metadata/metadata_hash.c new file mode 100644 index 0000000..acb14a8 --- /dev/null +++ b/src/metadata/metadata_hash.c @@ -0,0 +1,2462 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_hash.h" +#include "metadata_raw.h" +#include "metadata_io.h" +#include "metadata_status.h" +#include "../concurrency/ocf_concurrency.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_def_priv.h" + +#define OCF_METADATA_HASH_DEBUG 0 + +#if 1 == OCF_METADATA_HASH_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Metadata][Hash] %s\n", __func__) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Metadata][Hash] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +#define METADATA_MEM_POOL(ctrl, section) ctrl->raw_desc[section].mem_pool + +static void ocf_metadata_hash_init_iface(struct ocf_cache *cache, + ocf_metadata_layout_t layout); + +#define OCF_METADATA_HASH_DIFF_MAX 1000 + +enum { + ocf_metadata_status_type_valid = 0, + ocf_metadata_status_type_dirty, + + ocf_metadata_status_type_max +}; + +static inline size_t ocf_metadata_status_sizeof( + const struct ocf_cache_line_settings *settings) { + /* Number of bytes required to mark cache line status */ + size_t size = settings->sector_count / 8; + + /* Number of types of status (valid, dirty, etc...) */ + size *= ocf_metadata_status_type_max; + + /* At the end we have size */ + return size; +} + +/* + * Hash metadata control structure + */ +struct ocf_metadata_hash_ctrl { + ocf_cache_line_t cachelines; + ocf_cache_line_t start_page; + ocf_cache_line_t count_pages; + uint32_t device_lines; + size_t mapping_size; + struct ocf_metadata_raw raw_desc[metadata_segment_max]; +}; + +/* + * get entries for specified metadata hash type + */ +static ocf_cache_line_t ocf_metadata_hash_get_entires( + enum ocf_metadata_segment type, + ocf_cache_line_t cache_lines) +{ + ENV_BUG_ON(type >= metadata_segment_variable_size_start && cache_lines == 0); + + switch (type) { + case metadata_segment_collision: + case metadata_segment_cleaning: + case metadata_segment_eviction: + case metadata_segment_list_info: + return cache_lines; + + case metadata_segment_hash: + return cache_lines / 4; + + case metadata_segment_sb_config: + return DIV_ROUND_UP(sizeof(struct ocf_superblock_config), + PAGE_SIZE); + + case metadata_segment_sb_runtime: + return DIV_ROUND_UP(sizeof(struct ocf_superblock_runtime), + PAGE_SIZE); + + case metadata_segment_reserved: + return 32; + + case metadata_segment_core_config: + return OCF_CORE_MAX; + + case metadata_segment_core_runtime: + return OCF_CORE_MAX; + + case metadata_segment_core_uuid: + return OCF_CORE_MAX; + + default: + break; + } + + ENV_BUG(); + return 0; +} + +/* + * Get size of particular hash metadata type element + */ +static int64_t ocf_metadata_hash_get_element_size( + enum ocf_metadata_segment type, + const struct ocf_cache_line_settings *settings) +{ + int64_t size = 0; + + ENV_BUG_ON(type >= metadata_segment_variable_size_start && !settings); + + switch (type) { + case metadata_segment_eviction: + size = sizeof(union eviction_policy_meta); + break; + + case metadata_segment_cleaning: + size = sizeof(struct cleaning_policy_meta); + break; + + case metadata_segment_collision: + size = sizeof(struct ocf_metadata_map) + + ocf_metadata_status_sizeof(settings); + break; + + case metadata_segment_list_info: + size = sizeof(struct ocf_metadata_list_info); + break; + + case metadata_segment_sb_config: + size = PAGE_SIZE; + break; + + case metadata_segment_sb_runtime: + size = PAGE_SIZE; + break; + + case metadata_segment_reserved: + size = PAGE_SIZE; + break; + + case metadata_segment_hash: + size = sizeof(ocf_cache_line_t); + break; + + case metadata_segment_core_config: + size = sizeof(struct ocf_core_meta_config); + break; + + case metadata_segment_core_runtime: + size = sizeof(struct ocf_core_meta_runtime); + break; + + case metadata_segment_core_uuid: + size = sizeof(struct ocf_metadata_uuid); + break; + + default: + break; + + } + + ENV_BUG_ON(size > PAGE_SIZE); + + return size; +} + +/* + * Metadata calculation exception handling. + * + * @param unused_lines - Unused pages + * @param device_lines - SSD Cache device pages amount + * + * @return true - Accept unused sapce + * @return false - unused space is not acceptable + */ +static bool ocf_metadata_hash_calculate_exception_hndl(ocf_cache_t cache, + int64_t unused_lines, int64_t device_lines) +{ + static bool warn; + int64_t utilization = 0; + + if (!warn) { + ocf_cache_log(cache, log_warn, + "Metadata size calculation problem\n"); + warn = true; + } + + if (unused_lines < 0) + return false; + + /* + * Accepted disk utilization is 90 % off SSD space + */ + utilization = (device_lines - unused_lines) * 100 / device_lines; + + if (utilization < 90) + return false; + + return true; +} + +/* + * Algorithm to calculate amount of cache lines taking into account required + * space for metadata + */ +static int ocf_metadata_hash_calculate_metadata_size( + struct ocf_cache *cache, + struct ocf_metadata_hash_ctrl *ctrl, + const struct ocf_cache_line_settings *settings) +{ + int64_t i_diff = 0, diff_lines = 0, cache_lines = ctrl->device_lines; + int64_t lowest_diff; + ocf_cache_line_t count_pages; + uint32_t i; + + OCF_DEBUG_PARAM(cache, "Cache lines = %lld", cache_lines); + + cache_lines = ctrl->device_lines; + lowest_diff = cache_lines; + + do { + count_pages = ctrl->count_pages; + for (i = metadata_segment_variable_size_start; + i < metadata_segment_max; i++) { + struct ocf_metadata_raw *raw = &ctrl->raw_desc[i]; + + /* Setup number of entries */ + raw->entries + = ocf_metadata_hash_get_entires(i, cache_lines); + + /* + * Setup SSD location and size + */ + raw->ssd_pages_offset = count_pages; + raw->ssd_pages = DIV_ROUND_UP(raw->entries, + raw->entries_in_page); + + /* Update offset for next container */ + count_pages += ocf_metadata_raw_size_on_ssd( + cache, raw); + } + + /* + * Check if max allowed iteration exceeded + */ + if (i_diff >= OCF_METADATA_HASH_DIFF_MAX) { + /* + * Never should be here but try handle this exception + */ + if (ocf_metadata_hash_calculate_exception_hndl(cache, + diff_lines, ctrl->device_lines)) { + break; + } + + if (i_diff > (2 * OCF_METADATA_HASH_DIFF_MAX)) { + /* + * We tried, but we fallen, have to return error + */ + ocf_cache_log(cache, log_err, + "Metadata size calculation ERROR\n"); + return -1; + } + } + + /* Calculate diff of cache lines */ + + /* Cache size in bytes */ + diff_lines = ctrl->device_lines * settings->size; + /* Sub metadata size which is in 4 kiB unit */ + diff_lines -= count_pages * PAGE_SIZE; + /* Convert back to cache lines */ + diff_lines /= settings->size; + /* Calculate difference */ + diff_lines -= cache_lines; + + if (diff_lines > 0) { + if (diff_lines < lowest_diff) + lowest_diff = diff_lines; + else if (diff_lines == lowest_diff) + break; + } + + /* Update new value of cache lines */ + cache_lines += diff_lines; + + OCF_DEBUG_PARAM(cache, "Diff pages = %lld", diff_lines); + OCF_DEBUG_PARAM(cache, "Cache lines = %lld", cache_lines); + + i_diff++; + + } while (diff_lines); + + ctrl->count_pages = count_pages; + ctrl->cachelines = cache_lines; + OCF_DEBUG_PARAM(cache, "Cache lines = %u", ctrl->cachelines); + + if (ctrl->device_lines < ctrl->cachelines) + return -1; + + return 0; +} + +static const char * const ocf_metadata_hash_raw_names[] = { + [metadata_segment_sb_config] = "Super block config", + [metadata_segment_sb_runtime] = "Super block runtime", + [metadata_segment_reserved] = "Reserved", + [metadata_segment_cleaning] = "Cleaning", + [metadata_segment_eviction] = "Eviction", + [metadata_segment_collision] = "Collision", + [metadata_segment_list_info] = "List info", + [metadata_segment_hash] = "Hash", + [metadata_segment_core_config] = "Core config", + [metadata_segment_core_runtime] = "Core runtime", + [metadata_segment_core_uuid] = "Core UUID", +}; +#if 1 == OCF_METADATA_HASH_DEBUG +/* + * Debug info functions prints metadata and raw containers information + */ +static void ocf_metadata_hash_raw_info(struct ocf_cache *cache, + struct ocf_metadata_hash_ctrl *ctrl) +{ + uint64_t capacity = 0; + uint64_t capacity_sum = 0; + uint32_t i = 0; + const char *unit; + + for (i = 0; i < metadata_segment_max; i++) { + struct ocf_metadata_raw *raw = &(ctrl->raw_desc[i]); + + OCF_DEBUG_PARAM(cache, "Raw : name = %s", + ocf_metadata_hash_raw_names[i]); + OCF_DEBUG_PARAM(cache, " : metadata type = %u", i); + OCF_DEBUG_PARAM(cache, " : raw type = %u", + raw->raw_type); + OCF_DEBUG_PARAM(cache, " : entry size = %u", + raw->entry_size); + OCF_DEBUG_PARAM(cache, " : entries = %llu", + raw->entries); + OCF_DEBUG_PARAM(cache, " : entries in page = %u", + raw->entries_in_page); + OCF_DEBUG_PARAM(cache, " : page offset = %llu", + raw->ssd_pages_offset); + OCF_DEBUG_PARAM(cache, " : pages = %llu", + raw->ssd_pages); + } + + /* Provide capacity info */ + for (i = 0; i < metadata_segment_max; i++) { + capacity = ocf_metadata_raw_size_of(cache, + &(ctrl->raw_desc[i])); + + capacity_sum += capacity; + + if (capacity / MiB) { + capacity = capacity / MiB; + unit = "MiB"; + } else { + unit = "KiB"; + capacity = capacity / KiB; + + } + + OCF_DEBUG_PARAM(cache, "%s capacity %llu %s", + ocf_metadata_hash_raw_names[i], capacity, unit); + } +} +#else +#define ocf_metadata_hash_raw_info(cache, ctrl) +#endif + +/* + * Deinitialize hash metadata interface + */ +static void ocf_metadata_hash_deinit_variable_size(struct ocf_cache *cache) +{ + + int result = 0; + uint32_t i = 0; + + struct ocf_metadata_hash_ctrl *ctrl = (struct ocf_metadata_hash_ctrl *) + cache->metadata.iface_priv; + + OCF_DEBUG_TRACE(cache); + + /* + * De initialize RAW types + */ + for (i = metadata_segment_variable_size_start; + i < metadata_segment_max; i++) { + result |= ocf_metadata_raw_deinit(cache, + &(ctrl->raw_desc[i])); + } +} + +static inline void ocf_metadata_config_init(struct ocf_cache *cache, + struct ocf_cache_line_settings *settings, size_t size) +{ + ENV_BUG_ON(!ocf_cache_line_size_is_valid(size)); + + ENV_BUG_ON(env_memset(settings, sizeof(*settings), 0)); + + settings->size = size; + settings->sector_count = BYTES_TO_SECTORS(settings->size); + settings->sector_start = 0; + settings->sector_end = settings->sector_count - 1; + + OCF_DEBUG_PARAM(cache, "Cache line size = %lu, bits count = %llu, " + "status size = %lu", + settings->size, settings->sector_count, + ocf_metadata_status_sizeof(settings)); +} + +static void ocf_metadata_hash_deinit(struct ocf_cache *cache) +{ + int result = 0; + uint32_t i; + + struct ocf_metadata_hash_ctrl *ctrl = (struct ocf_metadata_hash_ctrl *) + cache->metadata.iface_priv; + + for (i = 0; i < metadata_segment_fixed_size_max; i++) { + result |= ocf_metadata_raw_deinit(cache, + &(ctrl->raw_desc[i])); + } + + env_vfree(ctrl); + cache->metadata.iface_priv = NULL; + + if (result) + ENV_BUG(); +} + +static int ocf_metadata_hash_init(struct ocf_cache *cache, + ocf_cache_line_size_t cache_line_size) +{ + struct ocf_metadata_hash_ctrl *ctrl = NULL; + struct ocf_cache_line_settings *settings = + (struct ocf_cache_line_settings *)&cache->metadata.settings; + uint32_t i = 0; + uint32_t page = 0; + int result = 0; + + OCF_DEBUG_TRACE(cache); + + ENV_WARN_ON(cache->metadata.iface_priv); + + ctrl = env_vzalloc(sizeof(*ctrl)); + if (!ctrl) + return -ENOMEM; + + cache->metadata.iface_priv = ctrl; + + ocf_metadata_config_init(cache, settings, cache_line_size); + + /* Initial setup of RAW containers */ + for (i = 0; i < metadata_segment_fixed_size_max; i++) { + struct ocf_metadata_raw *raw = &ctrl->raw_desc[i]; + + raw->metadata_segment = i; + + /* Default type for metadata RAW container */ + raw->raw_type = metadata_raw_type_ram; + + if (cache->metadata.is_volatile) { + raw->raw_type = metadata_raw_type_volatile; + } else if (i == metadata_segment_core_uuid) { + raw->raw_type = metadata_raw_type_dynamic; + } + + /* Entry size configuration */ + raw->entry_size + = ocf_metadata_hash_get_element_size(i, NULL); + raw->entries_in_page = PAGE_SIZE / raw->entry_size; + + /* Setup number of entries */ + raw->entries = ocf_metadata_hash_get_entires(i, 0); + + /* + * Setup SSD location and size + */ + raw->ssd_pages_offset = page; + raw->ssd_pages = DIV_ROUND_UP(raw->entries, + raw->entries_in_page); + + /* Update offset for next container */ + page += ocf_metadata_raw_size_on_ssd(cache, raw); + } + + ctrl->count_pages = page; + + for (i = 0; i < metadata_segment_fixed_size_max; i++) { + result |= ocf_metadata_raw_init(cache, &(ctrl->raw_desc[i])); + if (result) + break; + } + + if (result) { + ocf_metadata_hash_deinit(cache); + } else { + cache->conf_meta = METADATA_MEM_POOL(ctrl, + metadata_segment_sb_config); + + /* Set core metadata */ + cache->core_conf_meta = METADATA_MEM_POOL(ctrl, + metadata_segment_core_config); + + cache->core_runtime_meta = METADATA_MEM_POOL(ctrl, + metadata_segment_core_runtime); + + env_spinlock_init(&cache->metadata.lock.eviction); + env_rwlock_init(&cache->metadata.lock.status); + env_rwsem_init(&cache->metadata.lock.collision); + } + + return result; +} + + + +/* + * Initialize hash metadata interface + */ +static int ocf_metadata_hash_init_variable_size(struct ocf_cache *cache, + uint64_t device_size, ocf_cache_line_size_t cache_line_size, + ocf_metadata_layout_t layout) +{ + int result = 0; + uint32_t i = 0; + ocf_cache_line_t line; + struct ocf_metadata_hash_ctrl *ctrl = NULL; + struct ocf_cache_line_settings *settings = + (struct ocf_cache_line_settings *)&cache->metadata.settings; + + OCF_DEBUG_TRACE(cache); + + ENV_WARN_ON(!cache->metadata.iface_priv); + + ctrl = cache->metadata.iface_priv; + + ctrl->device_lines = device_size / cache_line_size; + + if (settings->size != cache_line_size) + /* Re-initialize settings with different cache line size */ + ocf_metadata_config_init(cache, settings, cache_line_size); + + ctrl->mapping_size = ocf_metadata_status_sizeof(settings) + + sizeof(struct ocf_metadata_map); + + ocf_metadata_hash_init_iface(cache, layout); + + /* Initial setup of dynamic size RAW containers */ + for (i = metadata_segment_variable_size_start; + i < metadata_segment_max; i++) { + struct ocf_metadata_raw *raw = &ctrl->raw_desc[i]; + + raw->metadata_segment = i; + + /* Default type for metadata RAW container */ + raw->raw_type = metadata_raw_type_ram; + + if (cache->device->init_mode == ocf_init_mode_metadata_volatile) { + raw->raw_type = metadata_raw_type_volatile; + } else if (i == metadata_segment_collision && + ocf_data_obj_is_atomic(&cache->device->obj)) { + raw->raw_type = metadata_raw_type_atomic; + } + + /* Entry size configuration */ + raw->entry_size + = ocf_metadata_hash_get_element_size(i, settings); + raw->entries_in_page = PAGE_SIZE / raw->entry_size; + } + + if (0 != ocf_metadata_hash_calculate_metadata_size(cache, ctrl, + settings)) { + return -1; + } + + OCF_DEBUG_PARAM(cache, "Metadata begin pages = %u", ctrl->start_page); + OCF_DEBUG_PARAM(cache, "Metadata count pages = %u", ctrl->count_pages); + OCF_DEBUG_PARAM(cache, "Metadata end pages = %u", ctrl->start_page + + ctrl->count_pages); + + /* + * Initialize all dynamic size RAW types + */ + for (i = metadata_segment_variable_size_start; + i < metadata_segment_max; i++) { + result |= ocf_metadata_raw_init(cache, &(ctrl->raw_desc[i])); + + if (result) + goto finalize; + } + + for (i = 0; i < metadata_segment_max; i++) { + ocf_cache_log(cache, log_info, "%s offset : %llu kiB\n", + ocf_metadata_hash_raw_names[i], + ctrl->raw_desc[i].ssd_pages_offset + * PAGE_SIZE / KiB); + if (i == metadata_segment_sb_config) { + ocf_cache_log(cache, log_info, "%s size : %lu B\n", + ocf_metadata_hash_raw_names[i], + offsetof(struct ocf_superblock_config, checksum) + + sizeof(((struct ocf_superblock_config *)0) + ->checksum)); + } else if (i == metadata_segment_sb_runtime) { + ocf_cache_log(cache, log_info, "%s size : %lu B\n", + ocf_metadata_hash_raw_names[i], + sizeof(struct ocf_superblock_runtime)); + } else { + ocf_cache_log(cache, log_info, "%s size : %llu kiB\n", + ocf_metadata_hash_raw_names[i], + ctrl->raw_desc[i].ssd_pages + * PAGE_SIZE / KiB); + } + } + +finalize: + if (result) { + /* + * Hash De-Init also contains RAW deinitialization + */ + ocf_metadata_hash_deinit_variable_size(cache); + } else { + cache->device->runtime_meta = METADATA_MEM_POOL(ctrl, + metadata_segment_sb_runtime); + + cache->device->collision_table_entries = ctrl->cachelines; + + cache->device->hash_table_entries = + ctrl->raw_desc[metadata_segment_hash].entries; + + cache->device->metadata_offset = ctrl->count_pages * PAGE_SIZE; + cache->device->metadata_offset_line = ctrl->count_pages; + + cache->conf_meta->cachelines = ctrl->cachelines; + cache->conf_meta->line_size = cache_line_size; + + ocf_metadata_hash_raw_info(cache, ctrl); + + ocf_cache_log(cache, log_info, "Cache line size: %llu kiB\n", + settings->size / KiB); + + ocf_cache_log(cache, log_info, "Metadata capacity: %llu MiB\n", + (uint64_t)ocf_metadata_size_of(cache) / MiB); + } + + /* + * Self test of metadata + */ + for (line = 0; line < cache->device->collision_table_entries; line++) { + ocf_cache_line_t phy, lg; + + phy = ocf_metadata_map_lg2phy(cache, line); + lg = ocf_metadata_map_phy2lg(cache, phy); + + if (line != lg) { + result = -EINVAL; + break; + } + } + + if (result == 0) { + ocf_cache_log(cache, log_info, + "OCF metadata self-test PASSED\n"); + } else { + ocf_cache_log(cache, log_err, + "OCF metadata self-test ERROR\n"); + } + + return result; +} + +static inline void _ocf_init_collision_entry(struct ocf_cache *cache, + ocf_cache_line_t idx, ocf_cache_line_t next, + ocf_cache_line_t prev) +{ + ocf_cache_line_t invalid_idx = cache->device->collision_table_entries; + ocf_part_id_t invalid_part_id = PARTITION_INVALID; + + ocf_metadata_set_partition_info(cache, idx, + invalid_part_id, next, prev); + ocf_metadata_set_collision_info(cache, idx, invalid_idx, invalid_idx); + ocf_metadata_set_core_info(cache, idx, + OCF_CORE_MAX, ULONG_MAX); + metadata_init_status_bits(cache, idx); +} + +/* + * Default initialization of freelist partition + */ +static void ocf_metadata_hash_init_freelist_seq(struct ocf_cache *cache) +{ + uint32_t step = 0; + unsigned int i = 0; + ocf_cache_line_t collision_table_entries = + cache->device->collision_table_entries; + + cache->device->freelist_part->head = 0; + cache->device->freelist_part->curr_size = cache->device->collision_table_entries; + + /* hash_father is an index in hash_table and it's limited + * to the hash_table_entries + * hash_table_entries is invalid index here. + */ + _ocf_init_collision_entry(cache, i, 1, collision_table_entries); + + for (i = 1; i < collision_table_entries - 1; i++) { + _ocf_init_collision_entry(cache, i, i + 1, i - 1); + OCF_COND_RESCHED_DEFAULT(step); + } + + cache->device->freelist_part->tail = i; + _ocf_init_collision_entry(cache, i, collision_table_entries, i - 1); +} + +/* + * Modified initialization of freelist partition + */ +static void ocf_metadata_hash_init_freelist_striping( + struct ocf_cache *cache) +{ + uint32_t step = 0; + unsigned int i, j; + ocf_cache_line_t prev, next; + ocf_cache_line_t idx, last_page; + ocf_cache_line_t collision_table_entries = + cache->device->collision_table_entries; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + unsigned int entries_in_page = + ctrl->raw_desc[metadata_segment_collision].entries_in_page; + unsigned int pages = + ctrl->raw_desc[metadata_segment_collision].ssd_pages; + + cache->device->freelist_part->head = 0; + cache->device->freelist_part->curr_size = cache->device->collision_table_entries; + + /* Modified initialization procedure */ + prev = next = collision_table_entries; + idx = 0; + last_page = pages; + + for (i = 0; i < pages; i++) { + idx = i * entries_in_page; + for (j = 0; j < entries_in_page && + idx < collision_table_entries; j++) { + next = idx + entries_in_page; + + if (next >= collision_table_entries) + next = j + 1; + + _ocf_init_collision_entry(cache, idx, next, prev); + + if (idx >= entries_in_page - 1) { + prev = idx - entries_in_page + 1; + } else { + prev = last_page * entries_in_page + j; + if (prev >= collision_table_entries) { + prev -= entries_in_page; + last_page = pages - 1; + } + } + + OCF_COND_RESCHED_DEFAULT(step); + idx++; + } + } + + if (collision_table_entries < entries_in_page) { + idx = collision_table_entries - 1; + } else { + idx = pages * entries_in_page - 1; + if (idx >= collision_table_entries) + idx -= entries_in_page; + + } + + /* Terminate free list */ + cache->device->freelist_part->tail = idx; + ocf_metadata_set_partition_next(cache, idx, collision_table_entries); +} + + +/* + * Initialize hash table + */ +static void ocf_metadata_hash_init_hash_table(struct ocf_cache *cache) +{ + unsigned int i; + unsigned int hash_table_entries = cache->device->hash_table_entries; + ocf_cache_line_t invalid_idx = cache->device->collision_table_entries; + + /* Init hash table */ + for (i = 0; i < hash_table_entries; i++) { + /* hash_table contains indexes from collision_table + * thus it shall be initialized in improper values + * from collision_table + **/ + ocf_metadata_set_hash(cache, i, invalid_idx); + } + +} + +/* + * Get count of pages that is dedicated for metadata + */ +static ocf_cache_line_t ocf_metadata_hash_pages(struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + return ctrl->count_pages; +} + +/* + * Get amount of cache lines + */ +static ocf_cache_line_t ocf_metadata_hash_cachelines( + struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + return ctrl->cachelines; +} + +static size_t ocf_metadata_hash_size_of(struct ocf_cache *cache) +{ + uint32_t i = 0; + size_t size = 0; + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* + * Get size of all RAW metadata container + */ + for (i = 0; i < metadata_segment_max; i++) { + size += ocf_metadata_raw_size_of(cache, + &(ctrl->raw_desc[i])); + } + + /* Get additional part of memory footprint */ + + /* Cache concurrency mechnism */ + size += ocf_cache_concurrency_size_of(cache); + + return size; +} + +/******************************************************************************* + * Super Block + ******************************************************************************/ + +/* + * Super Block - Load, This function has to prevent to pointers overwrite + */ +static int ocf_metadata_hash_load_superblock(struct ocf_cache *cache) +{ + int result = 0; + uint32_t i = 0; + struct ocf_metadata_hash_ctrl *ctrl; + struct ocf_superblock_config *sb_config; + struct ocf_superblock_runtime *sb_runtime; + struct ocf_metadata_uuid *muuid; + struct ocf_data_obj_uuid uuid; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + ENV_BUG_ON(!ctrl); + + sb_config = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); + ENV_BUG_ON(!sb_config); + + sb_runtime = METADATA_MEM_POOL(ctrl, metadata_segment_sb_runtime); + ENV_BUG_ON(!sb_runtime); + + /* Load super block main information */ + result |= ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[metadata_segment_sb_config])); + + result |= ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[metadata_segment_sb_runtime])); + + /* Load core information */ + result |= ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[metadata_segment_core_config])); + result |= ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[metadata_segment_core_uuid])); + + /* Do loading */ + if (result) { + /* Loading super block failure */ + ocf_cache_log(cache, log_err, + "Loading metadata of super block ERROR"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + result = env_crc32(0, (void *)sb_config, + offsetof(struct ocf_superblock_config, checksum)) != + sb_config->checksum[metadata_segment_sb_config]; + + if (result) { + /* Checksum does not match */ + ocf_cache_log(cache, log_err, + "Loading config super block ERROR, invalid checksum"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + result = ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[metadata_segment_sb_runtime])) != + sb_config->checksum[metadata_segment_sb_runtime]; + + if (result) { + /* Checksum does not match */ + ocf_cache_log(cache, log_err, + "Loading runtime super block ERROR, invalid checksum"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + result = ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[metadata_segment_core_config])) != + sb_config->checksum[metadata_segment_core_config]; + + if (result) { + /* Checksum does not match */ + ocf_cache_log(cache, log_err, + "Loading core config section ERROR, invalid checksum"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + result = ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[metadata_segment_core_uuid])) != + sb_config->checksum[metadata_segment_core_uuid]; + + if (result) { + /* Checksum does not match */ + ocf_cache_log(cache, log_err, + "Loading uuid section ERROR, invalid checksum"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + for (i = 0; i < OCF_CORE_MAX; i++) { + if (!cache->core_conf_meta[i].added) + continue; + + muuid = ocf_metadata_get_core_uuid(cache, i); + uuid.data = muuid->data; + uuid.size = muuid->size; + + /* Initialize core data object */ + ocf_data_obj_init(&cache->core_obj[i].obj, + ocf_ctx_get_data_obj_type(cache->owner, + cache->core_conf_meta[i].type), + &uuid, false); + } + + /* Restore all dynamics items */ + + if (sb_config->core_obj_count > OCF_CORE_MAX) { + ocf_cache_log(cache, log_err, + "Loading cache state ERROR, invalid cores count\n"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + if (sb_config->valid_parts_no > OCF_IO_CLASS_MAX) { + ocf_cache_log(cache, log_err, + "Loading cache state ERROR, invalid partition count\n"); + goto ocf_metadata_hash_load_superblock_ERROR; + } + + return 0; + +ocf_metadata_hash_load_superblock_ERROR: + + ocf_cache_log(cache, log_err, "Metadata read FAILURE\n"); + ocf_metadata_error(cache); + return -1; + +} + +/* + * Super Block - FLUSH + */ +static int ocf_metadata_hash_flush_superblock(struct ocf_cache *cache) +{ + uint32_t i; + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl; + struct ocf_superblock_config *superblock; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); + + /* Synchronize core objects types */ + for (i = 0; i < OCF_CORE_MAX; i++) { + cache->core_conf_meta[i].type = ocf_ctx_get_data_obj_type_id( + cache->owner, cache->core_obj[i].obj.type); + } + + /* Calculate checksum */ + superblock->checksum[metadata_segment_sb_config] = env_crc32(0, + (void *)superblock, + offsetof(struct ocf_superblock_config, checksum)); + + superblock->checksum[metadata_segment_core_config] = + ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[metadata_segment_core_config])); + + superblock->checksum[metadata_segment_core_uuid] = + ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[metadata_segment_core_uuid])); + + /** + * Flush RAW container that contains super block + */ + result = ocf_metadata_raw_flush_all(cache, + &(ctrl->raw_desc[metadata_segment_sb_config])); + + result |= ocf_metadata_raw_flush_all(cache, + &(ctrl->raw_desc[metadata_segment_core_config])); + + result |= ocf_metadata_raw_flush_all(cache, + &(ctrl->raw_desc[metadata_segment_core_uuid])); + if (result) + ocf_metadata_error(cache); + + return result; +} + +/** + * @brief Super Block - Set Shutdown Status + * + * to get shutdown status, one needs to call ocf_metadata_load_properties. + * @param shutdown_status - status to be assigned to cache. + * + * @return Operation status (0 success, otherwise error) + */ +static int ocf_metadata_hash_set_shutdown_status( + struct ocf_cache *cache, + enum ocf_metadata_shutdown_status shutdown_status) +{ + struct ocf_metadata_hash_ctrl *ctrl; + struct ocf_superblock_config *superblock; + + OCF_DEBUG_TRACE(cache); + + /* + * Get metadata hash service control structure + */ + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* + * Get super block + */ + superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); + + /* Set shutdown status */ + superblock->clean_shutdown = shutdown_status; + superblock->magic_number = CACHE_MAGIC_NUMBER; + + /* Flush superblock */ + return ocf_metadata_hash_flush_superblock(cache); +} + +/******************************************************************************* + * RESERVED AREA + ******************************************************************************/ + +static uint64_t ocf_metadata_hash_get_reserved_lba( + struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + return ctrl->raw_desc[metadata_segment_reserved].ssd_pages_offset * + PAGE_SIZE; +} + +/******************************************************************************* + * FLUSH AND LOAD ALL + ******************************************************************************/ + +/* + * Flush all metadata + */ +static int ocf_metadata_hash_flush_all(struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl; + struct ocf_superblock_config *superblock; + int result = 0; + uint32_t i = 0; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); + + ocf_metadata_hash_set_shutdown_status(cache, + ocf_metadata_dirty_shutdown); + + /* + * Flush all RAW metadata container + */ + for (i = 0; i < metadata_segment_max; i++) { + if ((metadata_segment_sb_config == i) || + (metadata_segment_core_config == i) || + (metadata_segment_core_uuid == i)) { + continue; + } + + result |= ocf_metadata_raw_flush_all(cache, + &(ctrl->raw_desc[i])); + + } + + if (result == 0) { + for (i = 0; i < metadata_segment_max; i++) { + if ((metadata_segment_sb_config == i) || + (metadata_segment_core_config == i) || + (metadata_segment_core_uuid == i)) { + continue; + } + + superblock->checksum[i] = ocf_metadata_raw_checksum( + cache, &(ctrl->raw_desc[i])); + } + + /* Set clean shutdown status (it flushes entire superblock) */ + result = ocf_metadata_hash_set_shutdown_status(cache, + ocf_metadata_clean_shutdown); + } + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n"); + return result; + } + + ocf_cache_log(cache, log_info, "Done saving cache state!\n"); + return result; +} + +/* + * Flush specified cache line + */ +static void ocf_metadata_hash_flush(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* + * Flush all required metadata elements to make given metadata cache + * line persistent in case of recovery + */ + + /* Collision table to get mapping cache line to HDD sector*/ + result |= ocf_metadata_raw_flush(cache, + &(ctrl->raw_desc[metadata_segment_collision]), + line); + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, + "Metadata Flush ERROR for cache line %u\n", line); + } +} + +/* + * Flush specified cache line + */ +static void ocf_metadata_hash_flush_mark(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t map_idx, int to_state, + uint8_t start, uint8_t stop) +{ + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* + * Mark all required metadata elements to make given metadata cache + * line persistent in case of recovery + */ + + /* Collision table to get mapping cache line to HDD sector*/ + ocf_metadata_raw_flush_mark(cache, + &(ctrl->raw_desc[metadata_segment_collision]), + rq, map_idx, to_state, start, stop); +} + +/* + * Flush specified cache lines asynchronously + */ +static void ocf_metadata_hash_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, ocf_metadata_asynch_flush_hndl complete) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* + * Flush all required metadata elements to make given metadata cache + * line persistent in case of recovery + */ + + env_atomic_inc(&rq->req_remaining); /* Core device IO */ + + result |= ocf_metadata_raw_flush_do_asynch(cache, rq, + &(ctrl->raw_desc[metadata_segment_collision]), + complete); + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, "Metadata Flush ERROR\n"); + } +} + +/* + * Load all metadata + */ +static int ocf_metadata_hash_load_all(struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl; + struct ocf_superblock_config *superblock; + int result = 0, i = 0; + uint32_t checksum; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + superblock = METADATA_MEM_POOL(ctrl, metadata_segment_sb_config); + + /* + * Load all RAW metadata container + */ + for (i = 0; i < metadata_segment_max; i++) { + if ((metadata_segment_sb_config == i) || + (metadata_segment_sb_runtime == i) || + (metadata_segment_core_config == i) || + (metadata_segment_core_uuid == i)) { + /* Super block and core metadata are loaded separately */ + continue; + } + + result = ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[i])); + if (result) + break; + + if (i == metadata_segment_reserved) { + /* Don't check checksum for reserved area */ + continue; + } + + checksum = ocf_metadata_raw_checksum(cache, + &(ctrl->raw_desc[i])); + + if (checksum != superblock->checksum[i]) { + result = -EINVAL; + break; + } + } + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, "Metadata read FAILURE\n"); + return -1; + } + + /* + * TODO(rbaldyga): Is that related to metadata at all? If not, then it + * should be moved to some better place. + */ + /* Final error checking */ + if (!env_bit_test(ocf_cache_state_running, &cache->cache_state) + && !env_bit_test(ocf_cache_state_initializing, + &cache->cache_state)) { + ocf_cache_log(cache, log_err, + "Metadata Read failed! OCF Stopped!\n"); + return -1; + } + + ocf_cache_log(cache, log_info, "Done loading cache state\n"); + return 0; +} + +static void _recovery_rebuild_cline_metadata(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t core_line, + ocf_cache_line_t cache_line) +{ + ocf_part_id_t part_id; + ocf_cache_line_t hash_index; + + part_id = PARTITION_DEFAULT; + + ocf_metadata_remove_from_free_list(cache, cache_line); + + ocf_metadata_add_to_partition(cache, part_id, cache_line); + + hash_index = ocf_metadata_hash_func(cache, core_line, core_id); + ocf_metadata_add_to_collision(cache, core_id, core_line, hash_index, + cache_line); + + ocf_eviction_init_cache_line(cache, cache_line, part_id); + + ocf_eviction_set_hot_cache_line(cache, cache_line); + + env_atomic_inc(&cache->core_runtime_meta[core_id].cached_clines); + env_atomic_inc(&cache->core_runtime_meta[core_id]. + part_counters[part_id].cached_clines); + + if (metadata_test_dirty(cache, cache_line)) { + env_atomic_inc(&cache->core_runtime_meta[core_id]. + dirty_clines); + env_atomic_inc(&cache->core_runtime_meta[core_id]. + part_counters[part_id].dirty_clines); + env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id]. + dirty_since, 0, env_get_tick_count()); + } +} + +static void _recovery_invalidate_clean_sec(struct ocf_cache *cache, + ocf_cache_line_t cline) +{ + uint8_t i; + + for (i = ocf_line_start_sector(cache); + i <= ocf_line_end_sector(cache); i++) { + if (!metadata_test_dirty_one(cache, cline, i)) { + /* Invalidate clear sectors */ + metadata_clear_valid_sec_one(cache, cline, i); + } + } +} + +static void _recovery_reset_cline_metadata(struct ocf_cache *cache, + ocf_cache_line_t cline) +{ + ocf_cleaning_t clean_policy_type; + + ocf_metadata_set_core_info(cache, cline, OCF_CORE_MAX, ULLONG_MAX); + + metadata_clear_valid(cache, cline); + + clean_policy_type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(clean_policy_type >= ocf_cleaning_max); + + if (cleaning_policy_ops[clean_policy_type].init_cache_block != NULL) + cleaning_policy_ops[clean_policy_type]. + init_cache_block(cache, cline); +} + +static void _recovery_rebuild_metadata(struct ocf_cache *cache, + bool dirty_only) +{ + ocf_cache_line_t cline; + ocf_core_id_t core_id; + uint64_t core_line; + unsigned char step = 0; + + OCF_METADATA_LOCK_WR(); + + for (cline = 0; cline < cache->device->collision_table_entries; cline++) { + ocf_metadata_get_core_info(cache, cline, &core_id, &core_line); + if (core_id != OCF_CORE_MAX && + (!dirty_only || metadata_test_dirty(cache, + cline))) { + /* Rebuild metadata for mapped cache line */ + _recovery_rebuild_cline_metadata(cache, core_id, + core_line, cline); + if (dirty_only) + _recovery_invalidate_clean_sec(cache, cline); + } else { + /* Reset metadata for not mapped or clean cache line */ + _recovery_reset_cline_metadata(cache, cline); + } + + OCF_COND_RESCHED(step, 128); + } + + OCF_METADATA_UNLOCK_WR(); +} + +static int _ocf_metadata_hash_load_recovery_legacy( + struct ocf_cache *cache) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl = NULL; + + OCF_DEBUG_TRACE(cache); + + ctrl = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + /* Collision table to get mapping cache line to HDD sector*/ + result |= ocf_metadata_raw_load_all(cache, + &(ctrl->raw_desc[metadata_segment_collision])); + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, + "Metadata read for recovery FAILURE\n"); + return result; + } + + return result; +} + +static ocf_core_id_t _ocf_metadata_hash_find_core_by_seq( + struct ocf_cache *cache, ocf_seq_no_t seq_no) +{ + ocf_core_id_t i; + + if (seq_no == OCF_SEQ_NO_INVALID) + return OCF_CORE_ID_INVALID; + + for (i = OCF_CORE_ID_MIN; i <= OCF_CORE_ID_MAX; i++) + if (cache->core_conf_meta[i].seq_no == seq_no) + break; + + return i; +} +static int _ocf_metadata_hash_load_atomic(struct ocf_cache *cache, + uint64_t sector_addr, uint32_t sector_no, + ctx_data_t *data) +{ + uint32_t i; + struct ocf_atomic_metadata meta; + ocf_cache_line_t line = 0; + uint8_t pos = 0; + ocf_seq_no_t core_seq_no = OCF_SEQ_NO_INVALID; + ocf_core_id_t core_id = OCF_CORE_ID_INVALID; + uint64_t core_line = 0; + bool core_line_ok = false; + + for (i = 0; i < sector_no; i++) { + ctx_data_rd_check(cache->owner, &meta, data, sizeof(meta)); + + line = (sector_addr + i) / ocf_line_sectors(cache); + line = ocf_metadata_map_phy2lg(cache, line); + pos = (sector_addr + i) % ocf_line_sectors(cache); + core_seq_no = meta.core_seq_no; + core_line = meta.core_line; + + /* Look for core with sequence number same as cache line */ + core_id = _ocf_metadata_hash_find_core_by_seq( + cache, core_seq_no); + + if (pos == 0) + core_line_ok = false; + + if (meta.valid && core_id != OCF_CORE_ID_INVALID) { + if (!core_line_ok) { + ocf_metadata_set_core_info(cache, line, + core_id, core_line); + core_line_ok = true; + } + + metadata_set_valid_sec_one(cache, line, pos); + meta.dirty ? + metadata_set_dirty_sec_one(cache, line, pos) : + metadata_clear_dirty_sec_one(cache, line, pos); + } + } + + return 0; +} + +/* + * RAM Implementation - Load all metadata elements from SSD + */ +static int _ocf_metadata_hash_load_recovery_atomic( + struct ocf_cache *cache) +{ + int result = 0; + + OCF_DEBUG_TRACE(cache); + + /* Collision table to get mapping cache line to HDD sector*/ + result |= metadata_io_read_i_atomic(cache, + _ocf_metadata_hash_load_atomic); + + if (result) { + ocf_metadata_error(cache); + ocf_cache_log(cache, log_err, + "Metadata read for recovery FAILURE\n"); + return result; + } + + return result; +} + +/* + * Load for recovery - Load only data that is required for recovery procedure + */ +static int ocf_metadata_hash_load_recovery(struct ocf_cache *cache) +{ + int result = 0; + bool rebuild_dirty_only; + + OCF_DEBUG_TRACE(cache); + + + if (ocf_data_obj_is_atomic(&cache->device->obj)) { + result = _ocf_metadata_hash_load_recovery_atomic(cache); + rebuild_dirty_only = false; + } else { + result = _ocf_metadata_hash_load_recovery_legacy(cache); + rebuild_dirty_only = true; + } + + if (!result) + _recovery_rebuild_metadata(cache, rebuild_dirty_only); + + return result; +} + +/******************************************************************************* + * Core Info + ******************************************************************************/ +static void ocf_metadata_hash_get_core_info(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t *core_id, + uint64_t *core_sector) +{ + const struct ocf_metadata_map *collision; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + collision = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_collision]), line, + ctrl->mapping_size); + if (collision) { + if (core_id) + *core_id = collision->core_id; + if (core_sector) + *core_sector = collision->core_line; + } else { + ocf_metadata_error(cache); + + if (core_id) + *core_id = OCF_CORE_MAX; + if (core_sector) + *core_sector = ULLONG_MAX; + } +} + +static void ocf_metadata_hash_set_core_info(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t core_id, + uint64_t core_sector) +{ + struct ocf_metadata_map *collisioin; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + collisioin = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_collision]), line, + ctrl->mapping_size); + + if (collisioin) { + collisioin->core_id = core_id; + collisioin->core_line = core_sector; + } else { + ocf_metadata_error(cache); + } +} + +static ocf_core_id_t ocf_metadata_hash_get_core_id( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_map *collision; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + collision = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_collision]), line, + ctrl->mapping_size); + + if (collision) + return collision->core_id; + + ocf_metadata_error(cache); + return OCF_CORE_MAX; +} + +static uint64_t ocf_metadata_hash_get_core_sector( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_map *collision; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + collision = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_collision]), line, + ctrl->mapping_size); + + if (collision) + return collision->core_line; + + ocf_metadata_error(cache); + return ULLONG_MAX; +} + +static struct ocf_metadata_uuid *ocf_metadata_hash_get_core_uuid( + struct ocf_cache *cache, ocf_core_id_t core_id) +{ + struct ocf_metadata_uuid *muuid; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + muuid = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_core_uuid]), + core_id, sizeof(struct ocf_metadata_uuid)); + + if (!muuid) + ocf_metadata_error(cache); + + return muuid; +} + +/******************************************************************************* + * Core and part id + ******************************************************************************/ + +static void ocf_metadata_hash_get_core_and_part_id( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_core_id_t *core_id, ocf_part_id_t *part_id) +{ + const struct ocf_metadata_map *collision; + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + collision = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_collision]), line, + ctrl->mapping_size); + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (collision && info) { + if (core_id) + *core_id = collision->core_id; + if (part_id) + *part_id = info->partition_id; + } else { + ocf_metadata_error(cache); + if (core_id) + *core_id = OCF_CORE_MAX; + if (part_id) + *part_id = PARTITION_DEFAULT; + } +} +/******************************************************************************* + * Hash Table + ******************************************************************************/ + +/* + * Hash Table - Get + */ +static ocf_cache_line_t ocf_metadata_hash_get_hash( + struct ocf_cache *cache, ocf_cache_line_t index) +{ + ocf_cache_line_t line = cache->device->collision_table_entries; + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_get(cache, + &(ctrl->raw_desc[metadata_segment_hash]), index, + &line, sizeof(line)); + + if (result) + ocf_metadata_error(cache); + + return line; +} + +/* + * Hash Table - Set + */ +static void ocf_metadata_hash_set_hash(struct ocf_cache *cache, + ocf_cache_line_t index, ocf_cache_line_t line) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_set(cache, + &(ctrl->raw_desc[metadata_segment_hash]), index, + &line, sizeof(line)); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Hash Table - Flush + */ +static void ocf_metadata_hash_flush_hash(struct ocf_cache *cache, + ocf_cache_line_t index) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_flush(cache, + &(ctrl->raw_desc[metadata_segment_hash]), index); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Hash Table - Get Entries + */ +static ocf_cache_line_t ocf_metadata_hash_entries_hash( + struct ocf_cache *cache) +{ + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + return ctrl->raw_desc[metadata_segment_hash].entries; +} + +/******************************************************************************* + * Cleaning Policy + ******************************************************************************/ + +/* + * Cleaning policy - Get + */ +static void ocf_metadata_hash_get_cleaning_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + struct cleaning_policy_meta *cleaning_policy) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_get(cache, + &(ctrl->raw_desc[metadata_segment_cleaning]), line, + cleaning_policy, sizeof(*cleaning_policy)); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Cleaning policy - Set + */ +static void ocf_metadata_hash_set_cleaning_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + struct cleaning_policy_meta *cleaning_policy) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_set(cache, + &(ctrl->raw_desc[metadata_segment_cleaning]), line, + cleaning_policy, sizeof(*cleaning_policy)); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Cleaning policy - Flush + */ +static void ocf_metadata_hash_flush_cleaning_policy( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_flush(cache, + &(ctrl->raw_desc[metadata_segment_cleaning]), line); + + if (result) + ocf_metadata_error(cache); +} + +/******************************************************************************* + * Eviction policy + ******************************************************************************/ + +/* + * Eviction policy - Get + */ +static void ocf_metadata_hash_get_eviction_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + union eviction_policy_meta *eviction_policy) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_get(cache, + &(ctrl->raw_desc[metadata_segment_eviction]), line, + eviction_policy, sizeof(*eviction_policy)); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Cleaning policy - Set + */ +static void ocf_metadata_hash_set_eviction_policy( + struct ocf_cache *cache, ocf_cache_line_t line, + union eviction_policy_meta *eviction_policy) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_set(cache, + &(ctrl->raw_desc[metadata_segment_eviction]), line, + eviction_policy, sizeof(*eviction_policy)); + + if (result) + ocf_metadata_error(cache); +} + +/* + * Cleaning policy - Flush + */ +static void ocf_metadata_hash_flush_eviction_policy( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + int result = 0; + struct ocf_metadata_hash_ctrl *ctrl + = (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + result = ocf_metadata_raw_flush(cache, + &(ctrl->raw_desc[metadata_segment_eviction]), line); + + if (result) + ocf_metadata_error(cache); +} + +/******************************************************************************* + * Collision + ******************************************************************************/ +static ocf_cache_line_t ocf_metadata_hash_map_lg2phy_seq( + struct ocf_cache *cache, ocf_cache_line_t coll_idx) +{ + return coll_idx; +} + +static ocf_cache_line_t ocf_metadata_hash_map_phy2lg_seq( + struct ocf_cache *cache, ocf_cache_line_t cache_line) +{ + return cache_line; +} + +static ocf_cache_line_t ocf_metadata_hash_map_lg2phy_striping( + struct ocf_cache *cache, ocf_cache_line_t coll_idx) +{ + ocf_cache_line_t cache_line = 0, offset = 0; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + unsigned int entries_in_page = + ctrl->raw_desc[metadata_segment_collision].entries_in_page; + unsigned int pages = + ctrl->raw_desc[metadata_segment_collision].ssd_pages; + ocf_cache_line_t collision_table_entries = + cache->device->collision_table_entries; + ocf_cache_line_t delta = + (entries_in_page * pages) - collision_table_entries; + unsigned int row = coll_idx % entries_in_page; + + if (row > entries_in_page - delta) + offset = row - (entries_in_page - delta); + else + offset = 0; + + cache_line = (row * pages) + (coll_idx / entries_in_page) - offset; + return cache_line; +} + +static ocf_cache_line_t ocf_metadata_hash_map_phy2lg_striping( + struct ocf_cache *cache, ocf_cache_line_t cache_line) +{ + ocf_cache_line_t coll_idx = 0; + + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + struct ocf_metadata_raw *raw = + &ctrl->raw_desc[metadata_segment_collision]; + + unsigned int pages = raw->ssd_pages; + unsigned int entries_in_page = raw->entries_in_page; + unsigned int entries_in_last_page = raw->entries % entries_in_page ?: + entries_in_page; + + unsigned int row = 0, coll = 0; + + unsigned int last = entries_in_last_page * pages; + + if (cache_line < last) { + row = cache_line % pages; + coll = cache_line / pages; + } else { + cache_line -= last; + row = cache_line % (pages - 1); + coll = cache_line / (pages - 1) + entries_in_last_page; + } + + coll_idx = (row * entries_in_page) + coll; + + return coll_idx; +} + +static void ocf_metadata_hash_set_collision_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next, ocf_cache_line_t prev) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) { + info->next_col = next; + info->prev_col = prev; + } else { + ocf_metadata_error(cache); + } +} + +static void ocf_metadata_hash_set_collision_next( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + info->next_col = next; + else + ocf_metadata_error(cache); +} + +static void ocf_metadata_hash_set_collision_prev( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t prev) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + info->prev_col = prev; + else + ocf_metadata_error(cache); +} + +static void ocf_metadata_hash_get_collision_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t *next, ocf_cache_line_t *prev) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + ENV_BUG_ON(NULL == next && NULL == prev); + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + if (info) { + if (next) + *next = info->next_col; + if (prev) + *prev = info->prev_col; + } else { + ocf_metadata_error(cache); + + if (next) + *next = cache->device->collision_table_entries; + if (prev) + *prev = cache->device->collision_table_entries; + } +} + +static ocf_cache_line_t ocf_metadata_hash_get_collision_next( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + if (info) + return info->next_col; + + ocf_metadata_error(cache); + return cache->device->collision_table_entries; +} + +static ocf_cache_line_t ocf_metadata_hash_get_collision_prev( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + if (info) + return info->prev_col; + + ocf_metadata_error(cache); + return cache->device->collision_table_entries; +} + +/******************************************************************************* + * Partition + ******************************************************************************/ + +static ocf_part_id_t ocf_metadata_hash_get_partition_id( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + return info->partition_id; + + ocf_metadata_error(cache); + return PARTITION_DEFAULT; +} + +static ocf_cache_line_t ocf_metadata_hash_get_partition_next( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + return info->partition_next; + + ocf_metadata_error(cache); + return PARTITION_DEFAULT; +} + +static ocf_cache_line_t ocf_metadata_hash_get_partition_prev( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + return info->partition_prev; + + ocf_metadata_error(cache); + return PARTITION_DEFAULT; +} + +static void ocf_metadata_hash_get_partition_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_part_id_t *part_id, ocf_cache_line_t *next_line, + ocf_cache_line_t *prev_line) +{ + const struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_rd_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) { + if (part_id) + *part_id = info->partition_id; + if (next_line) + *next_line = info->partition_next; + if (prev_line) + *prev_line = info->partition_prev; + } else { + ocf_metadata_error(cache); + if (part_id) + *part_id = PARTITION_DEFAULT; + if (next_line) + *next_line = cache->device->collision_table_entries; + if (prev_line) + *prev_line = cache->device->collision_table_entries; + } +} + +static void ocf_metadata_hash_set_partition_next( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next_line) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + info->partition_next = next_line; + else + ocf_metadata_error(cache); +} + +static void ocf_metadata_hash_set_partition_prev( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t prev_line) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) + info->partition_prev = prev_line; + else + ocf_metadata_error(cache); +} + +static void ocf_metadata_hash_set_partition_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_part_id_t part_id, ocf_cache_line_t next_line, + ocf_cache_line_t prev_line) +{ + struct ocf_metadata_list_info *info; + struct ocf_metadata_hash_ctrl *ctrl = + (struct ocf_metadata_hash_ctrl *) cache->metadata.iface_priv; + + info = ocf_metadata_raw_wr_access(cache, + &(ctrl->raw_desc[metadata_segment_list_info]), line, + sizeof(*info)); + + if (info) { + info->partition_id = part_id; + info->partition_next = next_line; + info->partition_prev = prev_line; + } else { + ocf_metadata_error(cache); + } +} + +/******************************************************************************* + * Hash Metadata interface definition + ******************************************************************************/ + +static const struct ocf_metadata_iface metadata_hash_iface = { + .init = ocf_metadata_hash_init, + .deinit = ocf_metadata_hash_deinit, + .init_variable_size = ocf_metadata_hash_init_variable_size, + .deinit_variable_size = ocf_metadata_hash_deinit_variable_size, + .init_hash_table = ocf_metadata_hash_init_hash_table, + + .layout_iface = NULL, + .pages = ocf_metadata_hash_pages, + .cachelines = ocf_metadata_hash_cachelines, + .size_of = ocf_metadata_hash_size_of, + + /* + * Load all, flushing all, etc... + */ + .flush_all = ocf_metadata_hash_flush_all, + .flush = ocf_metadata_hash_flush, + .flush_mark = ocf_metadata_hash_flush_mark, + .flush_do_asynch = ocf_metadata_hash_flush_do_asynch, + .load_all = ocf_metadata_hash_load_all, + .load_recovery = ocf_metadata_hash_load_recovery, + + /* + * Super Block + */ + .set_shutdown_status = ocf_metadata_hash_set_shutdown_status, + .flush_superblock = ocf_metadata_hash_flush_superblock, + .load_superblock = ocf_metadata_hash_load_superblock, + + /* + * Reserved area + */ + .get_reserved_lba = ocf_metadata_hash_get_reserved_lba, + + /* + * Core Info + */ + .set_core_info = ocf_metadata_hash_set_core_info, + .get_core_info = ocf_metadata_hash_get_core_info, + .get_core_id = ocf_metadata_hash_get_core_id, + .get_core_sector = ocf_metadata_hash_get_core_sector, + .get_core_uuid = ocf_metadata_hash_get_core_uuid, + + /* + * Core and part id + */ + + .get_core_and_part_id = ocf_metadata_hash_get_core_and_part_id, + + /* + * Collision Info + */ + .get_collision_info = ocf_metadata_hash_get_collision_info, + .set_collision_info = ocf_metadata_hash_set_collision_info, + .set_collision_next = ocf_metadata_hash_set_collision_next, + .set_collision_prev = ocf_metadata_hash_set_collision_prev, + .get_collision_next = ocf_metadata_hash_get_collision_next, + .get_collision_prev = ocf_metadata_hash_get_collision_prev, + + /* + * Partition Info + */ + .get_partition_id = ocf_metadata_hash_get_partition_id, + .get_partition_next = ocf_metadata_hash_get_partition_next, + .get_partition_prev = ocf_metadata_hash_get_partition_prev, + .get_partition_info = ocf_metadata_hash_get_partition_info, + .set_partition_next = ocf_metadata_hash_set_partition_next, + .set_partition_prev = ocf_metadata_hash_set_partition_prev, + .set_partition_info = ocf_metadata_hash_set_partition_info, + + /* + * Hash Table + */ + .get_hash = ocf_metadata_hash_get_hash, + .set_hash = ocf_metadata_hash_set_hash, + .flush_hash = ocf_metadata_hash_flush_hash, + .entries_hash = ocf_metadata_hash_entries_hash, + + /* + * Cleaning Policy + */ + .get_cleaning_policy = ocf_metadata_hash_get_cleaning_policy, + .set_cleaning_policy = ocf_metadata_hash_set_cleaning_policy, + .flush_cleaning_policy = ocf_metadata_hash_flush_cleaning_policy, + + /* + * Eviction Policy + */ + .get_eviction_policy = ocf_metadata_hash_get_eviction_policy, + .set_eviction_policy = ocf_metadata_hash_set_eviction_policy, + .flush_eviction_policy = ocf_metadata_hash_flush_eviction_policy, +}; + +/******************************************************************************* + * Bitmap status + ******************************************************************************/ + +#include "metadata_bit.h" + +static const struct ocf_metadata_layout_iface layout_ifaces[ocf_metadata_layout_max] = { + [ocf_metadata_layout_striping] = { + .init_freelist = ocf_metadata_hash_init_freelist_striping, + .lg2phy = ocf_metadata_hash_map_lg2phy_striping, + .phy2lg = ocf_metadata_hash_map_phy2lg_striping + }, + [ocf_metadata_layout_seq] = { + .init_freelist = ocf_metadata_hash_init_freelist_seq, + .lg2phy = ocf_metadata_hash_map_lg2phy_seq, + .phy2lg = ocf_metadata_hash_map_phy2lg_seq + } +}; + + +static void ocf_metadata_hash_init_iface(struct ocf_cache *cache, + ocf_metadata_layout_t layout) +{ + struct ocf_metadata_iface *iface = (struct ocf_metadata_iface *) + &cache->metadata.iface; + + ENV_BUG_ON(layout >= ocf_metadata_layout_max || layout < 0); + + /* Initialize metadata location interface*/ + if (cache->device->init_mode == ocf_init_mode_metadata_volatile) + layout = ocf_metadata_layout_seq; + iface->layout_iface = &layout_ifaces[layout]; + + /* Initialize bit status function */ + + switch (cache->metadata.settings.size) { + case ocf_cache_line_size_4: + iface->test_dirty = _ocf_metadata_test_dirty_u8; + iface->test_out_dirty = _ocf_metadata_test_out_dirty_u8; + iface->clear_dirty = _ocf_metadata_clear_dirty_u8; + iface->set_dirty = _ocf_metadata_set_dirty_u8; + iface->test_and_set_dirty = _ocf_metadata_test_and_set_dirty_u8; + iface->test_and_clear_dirty = + _ocf_metadata_test_and_clear_dirty_u8; + iface->test_valid = _ocf_metadata_test_valid_u8; + iface->test_out_valid = _ocf_metadata_test_out_valid_u8; + iface->clear_valid = _ocf_metadata_clear_valid_u8; + iface->set_valid = _ocf_metadata_set_valid_u8; + iface->test_and_set_valid = _ocf_metadata_test_and_set_valid_u8; + iface->test_and_clear_valid = + _ocf_metadata_test_and_clear_valid_u8; + break; + + case ocf_cache_line_size_8: + iface->test_dirty = _ocf_metadata_test_dirty_u16; + iface->test_out_dirty = _ocf_metadata_test_out_dirty_u16; + iface->clear_dirty = _ocf_metadata_clear_dirty_u16; + iface->set_dirty = _ocf_metadata_set_dirty_u16; + iface->test_and_set_dirty = + _ocf_metadata_test_and_set_dirty_u16; + iface->test_and_clear_dirty = + _ocf_metadata_test_and_clear_dirty_u16; + iface->test_valid = _ocf_metadata_test_valid_u16; + iface->test_out_valid = _ocf_metadata_test_out_valid_u16; + iface->clear_valid = _ocf_metadata_clear_valid_u16; + iface->set_valid = _ocf_metadata_set_valid_u16; + iface->test_and_set_valid = + _ocf_metadata_test_and_set_valid_u16; + iface->test_and_clear_valid = + _ocf_metadata_test_and_clear_valid_u16; + break; + + case ocf_cache_line_size_16: + iface->test_dirty = _ocf_metadata_test_dirty_u32; + iface->test_out_dirty = _ocf_metadata_test_out_dirty_u32; + iface->clear_dirty = _ocf_metadata_clear_dirty_u32; + iface->set_dirty = _ocf_metadata_set_dirty_u32; + iface->test_and_set_dirty = + _ocf_metadata_test_and_set_dirty_u32; + iface->test_and_clear_dirty = + _ocf_metadata_test_and_clear_dirty_u32; + iface->test_valid = _ocf_metadata_test_valid_u32; + iface->test_out_valid = _ocf_metadata_test_out_valid_u32; + iface->clear_valid = _ocf_metadata_clear_valid_u32; + iface->set_valid = _ocf_metadata_set_valid_u32; + iface->test_and_set_valid = + _ocf_metadata_test_and_set_valid_u32; + iface->test_and_clear_valid = + _ocf_metadata_test_and_clear_valid_u32; + break; + case ocf_cache_line_size_32: + iface->test_dirty = _ocf_metadata_test_dirty_u64; + iface->test_out_dirty = _ocf_metadata_test_out_dirty_u64; + iface->clear_dirty = _ocf_metadata_clear_dirty_u64; + iface->set_dirty = _ocf_metadata_set_dirty_u64; + iface->test_and_set_dirty = + _ocf_metadata_test_and_set_dirty_u64; + iface->test_and_clear_dirty = + _ocf_metadata_test_and_clear_dirty_u64; + iface->test_valid = _ocf_metadata_test_valid_u64; + iface->test_out_valid = _ocf_metadata_test_out_valid_u64; + iface->clear_valid = _ocf_metadata_clear_valid_u64; + iface->set_valid = _ocf_metadata_set_valid_u64; + iface->test_and_set_valid = + _ocf_metadata_test_and_set_valid_u64; + iface->test_and_clear_valid = + _ocf_metadata_test_and_clear_valid_u64; + break; + + case ocf_cache_line_size_64: + iface->test_dirty = _ocf_metadata_test_dirty_u128; + iface->test_out_dirty = _ocf_metadata_test_out_dirty_u128; + iface->clear_dirty = _ocf_metadata_clear_dirty_u128; + iface->set_dirty = _ocf_metadata_set_dirty_u128; + iface->test_and_set_dirty = + _ocf_metadata_test_and_set_dirty_u128; + iface->test_and_clear_dirty = + _ocf_metadata_test_and_clear_dirty_u128; + iface->test_valid = _ocf_metadata_test_valid_u128; + iface->test_out_valid = _ocf_metadata_test_out_valid_u128; + iface->clear_valid = _ocf_metadata_clear_valid_u128; + iface->set_valid = _ocf_metadata_set_valid_u128; + iface->test_and_set_valid = + _ocf_metadata_test_and_set_valid_u128; + iface->test_and_clear_valid = + _ocf_metadata_test_and_clear_valid_u128; + break; + + default: + ENV_BUG(); + break; + } +} + +/* + * Get metadata hash interface + */ +const struct ocf_metadata_iface *metadata_hash_get_iface(void) +{ + return &metadata_hash_iface; +} diff --git a/src/metadata/metadata_hash.h b/src/metadata/metadata_hash.h new file mode 100644 index 0000000..82cdf5e --- /dev/null +++ b/src/metadata/metadata_hash.h @@ -0,0 +1,49 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_HASH_H__ +#define __METADATA_HASH_H__ + +/** + * @file metadata_.h + * @brief Metadata Service - Hash Implementation + */ + +#include "../ocf_request.h" +/** + * @brief Metada hash elements type + */ +enum ocf_metadata_segment { + metadata_segment_sb_config = 0, /*!< Super block conf */ + metadata_segment_sb_runtime, /*!< Super block runtime */ + metadata_segment_reserved, /*!< Reserved space on disk */ + metadata_segment_core_config, /*!< Core Config Metadata */ + metadata_segment_core_runtime, /*!< Core Runtime Metadata */ + metadata_segment_core_uuid, /*!< Core UUID */ + /* .... new fixed size sections go here */ + + metadata_segment_fixed_size_max, + metadata_segment_variable_size_start = metadata_segment_fixed_size_max, + + /* sections with size dependent on cache device size go here: */ + metadata_segment_cleaning = /*!< Cleaning policy */ + metadata_segment_variable_size_start, + metadata_segment_eviction, /*!< Eviction policy */ + metadata_segment_collision, /*!< Collision */ + metadata_segment_list_info, /*!< Collision */ + metadata_segment_hash, /*!< Hash */ + /* .... new variable size sections go here */ + + metadata_segment_max, /*!< MAX */ +}; + +/** + * @brief Get metadata interface implementation + * + * @return metadata interface + */ +const struct ocf_metadata_iface *metadata_hash_get_iface(void); + +#endif /* METADATA_HASH_H_ */ diff --git a/src/metadata/metadata_io.c b/src/metadata/metadata_io.c new file mode 100644 index 0000000..0c0585f --- /dev/null +++ b/src/metadata/metadata_io.c @@ -0,0 +1,629 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "metadata.h" +#include "metadata_io.h" +#include "../ocf_priv.h" +#include "../engine/cache_engine.h" +#include "../engine/engine_common.h" +#include "../engine/engine_bf.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_allocator.h" +#include "../utils/utils_io.h" +#include "../ocf_def_priv.h" + +#define OCF_METADATA_IO_DEBUG 0 + +#if 1 == OCF_METADATA_IO_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__) + +#define OCF_DEBUG_MSG(cache, msg) \ + ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \ + __func__, msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +static void metadata_io_write_i_end_asynch(void *private_data, int error); +static int ocf_restart_meta_io(struct ocf_request *req); + +static struct ocf_io_if meta_restart_if = { + .read = ocf_restart_meta_io, + .write = ocf_restart_meta_io +}; + +/* + * Get max pages for IO + */ +static uint32_t metadata_io_max_page(struct ocf_cache *cache) +{ + return ocf_data_obj_get_max_io_size(&cache->device->obj) / + BYTES_TO_SECTORS(PAGE_SIZE); +} + +/* + * Iterative read end callback + */ +static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error) +{ + struct metadata_io_request_atomic *meta_atom_req = io->priv1; + + OCF_DEBUG_TRACE(ocf_data_obj_get_cache(io->obj)); + + meta_atom_req->error |= error; + env_completion_complete(&meta_atom_req->complete); +} + +/* + * Iterative read request + */ +int metadata_io_read_i_atomic(struct ocf_cache *cache, + ocf_metadata_atomic_io_event_t hndl) +{ + uint64_t i; + uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE; + uint64_t io_sectors_count = cache->device->collision_table_entries * + ocf_line_sectors(cache); + uint64_t count, curr_count; + int result = 0; + struct ocf_io *io; + ctx_data_t *data; + struct metadata_io_request_atomic meta_atom_req; + unsigned char step = 0; + + OCF_DEBUG_TRACE(cache); + + /* Allocate one 4k page for metadata*/ + data = ctx_data_alloc(cache->owner, 1); + if (!data) + return -ENOMEM; + + count = io_sectors_count; + for (i = 0; i < io_sectors_count; i += curr_count) { + /* Get sectors count of this IO iteration */ + curr_count = MIN(max_sectors_count, count); + + env_completion_init(&meta_atom_req.complete); + meta_atom_req.error = 0; + + /* Reset position in data buffer */ + ctx_data_seek(cache->owner, data, ctx_data_seek_begin, 0); + + /* Allocate new IO */ + io = ocf_new_cache_io(cache); + if (!io) { + result = -ENOMEM; + break; + } + + /* Setup IO */ + ocf_io_configure(io, + cache->device->metadata_offset + + SECTORS_TO_BYTES(i), + SECTORS_TO_BYTES(curr_count), + OCF_READ, 0, 0); + ocf_io_set_cmpl(io, &meta_atom_req, NULL, + metadata_io_read_i_atomic_end); + result = ocf_io_set_data(io, data, 0); + if (result) { + ocf_io_put(io); + break; + } + + /* Submit IO */ + ocf_dobj_submit_metadata(io); + ocf_io_put(io); + + /* Wait for completion of IO */ + env_completion_wait(&meta_atom_req.complete); + + /* Check for error */ + if (meta_atom_req.error) { + result = meta_atom_req.error; + break; + } + + result |= hndl(cache, i, curr_count, data); + if (result) + break; + + count -= curr_count; + + OCF_COND_RESCHED(step, 128); + } + + /* Memory free */ + ctx_data_free(cache->owner, data); + + return result; +} + +static int ocf_restart_meta_io(struct ocf_request *req) +{ + struct ocf_io *io; + struct metadata_io_request *meta_io_req; + struct ocf_cache *cache; + int i; + int ret; + + cache = req->cache; + meta_io_req = req->priv; + + /* Fill with the latest metadata. */ + OCF_METADATA_LOCK_RD(); + for (i = 0; i < meta_io_req->count; i++) { + meta_io_req->on_meta_fill(cache, meta_io_req->data, + meta_io_req->page + i, meta_io_req->context); + + } + OCF_METADATA_UNLOCK_RD(); + + io = ocf_new_cache_io(cache); + if (!io) { + metadata_io_write_i_end_asynch(meta_io_req, -ENOMEM); + return 0; + } + + /* Setup IO */ + ocf_io_configure(io, + PAGES_TO_BYTES(meta_io_req->page), + PAGES_TO_BYTES(meta_io_req->count), + OCF_WRITE, 0, 0); + + ocf_io_set_default_cmpl(io, meta_io_req, + metadata_io_write_i_end_asynch); + ret = ocf_io_set_data(io, meta_io_req->data, 0); + if (ret) { + ocf_io_put(io); + metadata_io_write_i_end_asynch(meta_io_req, ret); + return ret; + } + ocf_dobj_submit_io(io); + return 0; +} + +/* + * Iterative asynchronous write callback + */ +static void metadata_io_write_i_end_asynch(void *private_data, int error) +{ + struct metadata_io_request *request = (private_data); + struct metadata_io_request_asynch *a_req; + struct ocf_cache *cache; + + OCF_CHECK_NULL(request); + + cache = request->cache; + + a_req = request->asynch; + OCF_CHECK_NULL(a_req); + OCF_CHECK_NULL(a_req->on_complete); + + if (error) { + request->error |= error; + request->asynch->error |= error; + } + + if (env_atomic_dec_return(&request->req_remaining)) + return; + + OCF_DEBUG_PARAM(cache, "Page = %u", request->page); + + ctx_data_free(cache->owner, request->data); + request->data = NULL; + + if (env_atomic_dec_return(&a_req->req_remaining)) { + env_atomic_set(&request->finished, 1); + ocf_metadata_updater_kick(cache); + return; + } + + OCF_DEBUG_MSG(cache, "Asynchronous IO completed"); + + /* All IOs have been finished, call IO end callback */ + a_req->on_complete(request->cache, a_req->context, request->error); + + /* + * If it's last request, we mark is as finished + * after calling IO end callback + */ + env_atomic_set(&request->finished, 1); + ocf_metadata_updater_kick(cache); +} + +static void metadata_io_req_error(struct ocf_cache *cache, + struct metadata_io_request_asynch *a_req, + uint32_t i, int error) +{ + a_req->error |= error; + a_req->reqs[i].error |= error; + a_req->reqs[i].count = 0; + if (a_req->reqs[i].data) + ctx_data_free(cache->owner, a_req->reqs[i].data); + a_req->reqs[i].data = NULL; +} + +/* + * Iterative write request asynchronously + */ +int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue, + void *context, uint32_t page, uint32_t count, + ocf_metadata_io_event_t fill_hndl, + ocf_metadata_io_hndl_on_write_t compl_hndl) +{ + uint32_t curr_count, written; + uint32_t max_count = metadata_io_max_page(cache); + uint32_t io_count = DIV_ROUND_UP(count, max_count); + uint32_t i, i_fill; + int error = 0, ret; + struct ocf_io *io; + + /* Allocation and initialization of asynchronous metadata IO request */ + struct metadata_io_request_asynch *a_req; + + if (count == 0) + return 0; + + a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO); + if (!a_req) + return -OCF_ERR_NO_MEM; + + env_atomic_set(&a_req->req_remaining, io_count); + env_atomic_set(&a_req->req_active, io_count); + a_req->on_complete = compl_hndl; + a_req->context = context; + a_req->page = page; + + /* Allocate particular requests and initialize them */ + OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]), + io_count, &a_req->reqs_limit); + if (!a_req->reqs) { + env_free(a_req); + ocf_cache_log(cache, log_warn, + "No memory during metadata IO\n"); + return -OCF_ERR_NO_MEM; + } + /* IO Requests initialization */ + for (i = 0; i < io_count; i++) { + env_atomic_set(&(a_req->reqs[i].req_remaining), 1); + env_atomic_set(&(a_req->reqs[i].finished), 0); + a_req->reqs[i].asynch = a_req; + } + + OCF_DEBUG_PARAM(cache, "IO count = %u", io_count); + + i = 0; + written = 0; + while (count) { + /* Get pages count of this IO iteration */ + if (count > max_count) + curr_count = max_count; + else + curr_count = count; + + /* Fill request */ + a_req->reqs[i].cache = cache; + a_req->reqs[i].context = context; + a_req->reqs[i].page = page + written; + a_req->reqs[i].count = curr_count; + a_req->reqs[i].on_meta_fill = fill_hndl; + a_req->reqs[i].fl_req.io_if = &meta_restart_if; + a_req->reqs[i].fl_req.io_queue = queue; + a_req->reqs[i].fl_req.cache = cache; + a_req->reqs[i].fl_req.priv = &a_req->reqs[i]; + a_req->reqs[i].fl_req.info.internal = true; + + /* + * We don't want allocate map for this request in + * threads. + */ + a_req->reqs[i].fl_req.map = LIST_POISON1; + + INIT_LIST_HEAD(&a_req->reqs[i].list); + + a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count); + if (!a_req->reqs[i].data) { + error = -OCF_ERR_NO_MEM; + metadata_io_req_error(cache, a_req, i, error); + break; + } + + /* Issue IO if it is not overlapping with anything else */ + ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]); + if (ret == 0) { + /* Allocate new IO */ + io = ocf_new_cache_io(cache); + if (!io) { + error = -OCF_ERR_NO_MEM; + metadata_io_req_error(cache, a_req, i, error); + break; + } + + for (i_fill = 0; i_fill < curr_count; i_fill++) { + fill_hndl(cache, a_req->reqs[i].data, + page + written + i_fill, + context); + } + + /* Setup IO */ + ocf_io_configure(io, + PAGES_TO_BYTES(a_req->reqs[i].page), + PAGES_TO_BYTES(a_req->reqs[i].count), + OCF_WRITE, 0, 0); + + ocf_io_set_default_cmpl(io, &a_req->reqs[i], + metadata_io_write_i_end_asynch); + error = ocf_io_set_data(io, a_req->reqs[i].data, 0); + if (error) { + ocf_io_put(io); + metadata_io_req_error(cache, a_req, i, error); + break; + } + + ocf_dobj_submit_io(io); + } + + count -= curr_count; + written += curr_count; + i++; + } + + if (error == 0) { + /* No error, return 0 that indicates operation successful */ + return 0; + } + + OCF_DEBUG_MSG(cache, "ERROR"); + + if (i == 0) { + /* + * If no requests were submitted, we just call completion + * callback, free memory and return error. + */ + compl_hndl(cache, context, error); + + OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit); + env_free(a_req); + + return error; + } + + /* + * Decrement total reaming requests with IO that were not triggered. + * If we reached zero, we need to call completion callback. + */ + if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0) + compl_hndl(cache, context, error); + + /* + * Decrement total active requests with IO that were not triggered. + * If we reached zero, we need to free memory. + */ + if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) { + OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit); + env_free(a_req); + } + + return error; +} + +int ocf_metadata_io_init(ocf_cache_t cache) +{ + return ocf_metadata_updater_init(cache); +} + +void ocf_metadata_io_deinit(ocf_cache_t cache) +{ + ocf_metadata_updater_stop(cache); +} + +static void metadata_io_end(struct ocf_io *io, int error) +{ + struct metadata_io *mio = io->priv1; + ctx_data_t *data = ocf_io_get_data(io); + uint32_t page = BYTES_TO_PAGES(io->addr); + uint32_t count = BYTES_TO_PAGES(io->bytes); + struct ocf_cache *cache = mio->cache; + uint32_t i = 0; + + if (error) { + mio->error |= error; + goto out; + } + + for (i = 0; mio->dir == OCF_READ && i < count; i++) { + mio->error |= mio->hndl_fn(cache, data, page + i, + mio->hndl_cntx); + } + +out: + ctx_data_free(cache->owner, data); + ocf_io_put(io); + + if (env_atomic_dec_return(&mio->rq_remaining)) + return; + + env_completion_complete(&mio->completion); +} + +static int metadata_submit_io( + struct ocf_cache *cache, + struct metadata_io *mio, + uint32_t count, + uint32_t written) +{ + ctx_data_t *data; + struct ocf_io *io; + int err; + int i; + + /* Allocate IO */ + io = ocf_new_cache_io(cache); + if (!io) { + err = -ENOMEM; + goto error; + } + + /* Allocate data buffer for this IO */ + data = ctx_data_alloc(cache->owner, count); + if (!data) { + err = -ENOMEM; + goto put_io; + } + + /* Fill data */ + for (i = 0; mio->dir == OCF_WRITE && i < count; i++) { + err = mio->hndl_fn(cache, data, + mio->page + written + i, mio->hndl_cntx); + if (err) + goto free_data; + } + + /* Setup IO */ + ocf_io_configure(io, + PAGES_TO_BYTES(mio->page + written), + PAGES_TO_BYTES(count), + mio->dir, 0, 0); + ocf_io_set_cmpl(io, mio, NULL, metadata_io_end); + err = ocf_io_set_data(io, data, 0); + if (err) + goto free_data; + + /* Submit IO */ + env_atomic_inc(&mio->rq_remaining); + ocf_dobj_submit_io(io); + + return 0; + +free_data: + ctx_data_free(cache->owner, data); +put_io: + ocf_io_put(io); +error: + mio->error = err; + return err; +} + + +/* + * + */ +static int metadata_io(struct metadata_io *mio) +{ + uint32_t max_count = metadata_io_max_page(mio->cache); + uint32_t this_count, written = 0; + uint32_t count = mio->count; + unsigned char step = 0; + int err; + + struct ocf_cache *cache = mio->cache; + + /* Check direction value correctness */ + switch (mio->dir) { + case OCF_WRITE: + case OCF_READ: + break; + default: + return -EINVAL; + } + + env_atomic_set(&mio->rq_remaining, 1); + env_completion_init(&mio->completion); + + while (count) { + this_count = MIN(count, max_count); + + err = metadata_submit_io(cache, mio, this_count, written); + if (err) + break; + + /* Update counters */ + count -= this_count; + written += this_count; + + OCF_COND_RESCHED(step, 128); + } + + if (env_atomic_dec_return(&mio->rq_remaining) == 0) + env_completion_complete(&mio->completion); + + /* Wait for all IO to be finished */ + env_completion_wait(&mio->completion); + + return mio->error; +} + +/* + * + */ +int metadata_io_write_i(struct ocf_cache *cache, + uint32_t page, uint32_t count, + ocf_metadata_io_event_t hndl_fn, void *hndl_cntx) +{ + struct metadata_io mio = { + .dir = OCF_WRITE, + .cache = cache, + .page = page, + .count = count, + .hndl_fn = hndl_fn, + .hndl_cntx = hndl_cntx, + }; + + return metadata_io(&mio); +} + +/* + * + */ +int metadata_io_read_i(struct ocf_cache *cache, + uint32_t page, uint32_t count, + ocf_metadata_io_event_t hndl_fn, void *hndl_cntx) +{ + struct metadata_io mio = { + .dir = OCF_READ, + .cache = cache, + .page = page, + .count = count, + .hndl_fn = hndl_fn, + .hndl_cntx = hndl_cntx, + }; + + return metadata_io(&mio); +} + +/* + * + */ +static int metadata_io_write_fill(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + ctx_data_wr_check(cache->owner, data, context, PAGE_SIZE); + return 0; +} + +/* + * Write request + */ +int metadata_io_write(struct ocf_cache *cache, + void *data, uint32_t page) +{ + struct metadata_io mio = { + .dir = OCF_WRITE, + .cache = cache, + .page = page, + .count = 1, + .hndl_fn = metadata_io_write_fill, + .hndl_cntx = data, + }; + + + return metadata_io(&mio); +} diff --git a/src/metadata/metadata_io.h b/src/metadata/metadata_io.h new file mode 100644 index 0000000..40d4e4e --- /dev/null +++ b/src/metadata/metadata_io.h @@ -0,0 +1,188 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_IO_H__ +#define __METADATA_IO_H__ + +/** + * @file metadata_io.h + * @brief Metadata IO utilities + */ + +/** + * @brief Metadata IO event + * + * The client of metadata IO service if informed trough this event: + * - on completion of read from cache device + * - on fill data which will be written into cache device + * + * @param data[in,out] Environment data for read ot write IO + * @param page[in] Page which is issued + * @param context[in] context caller + * + * @retval 0 Success + * @retval Non-zero Error which will bee finally returned to the caller + */ +typedef int (*ocf_metadata_io_event_t)(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context); + +/** + * @brief Metadata write end callback + * + * @param cache - Cache instance + * @param context - Read context + * @param error - error + * @param page - page that was written + */ +typedef void (*ocf_metadata_io_hndl_on_write_t)(struct ocf_cache *cache, + void *context, int error); + +struct metadata_io_request_asynch; + +/* + * IO request context + */ +struct metadata_io_request { + struct ocf_cache *cache; + void *context; + uint32_t page; + uint32_t count; + ocf_metadata_io_event_t on_meta_fill; + env_atomic req_remaining; + ctx_data_t *data; + env_completion completion; + int error; + struct metadata_io_request_asynch *asynch; + env_atomic finished; + + struct ocf_request fl_req; + struct list_head list; +}; + +/* + * IO request context + */ +struct metadata_io_request_atomic { + env_completion complete; + int error; +}; + +/* + * + */ +struct metadata_io { + int error; + int dir; + struct ocf_cache *cache; + uint32_t page; + uint32_t count; + env_completion completion; + env_atomic rq_remaining; + ocf_metadata_io_event_t hndl_fn; + void *hndl_cntx; +}; + +/* + * Asynchronous IO request context + */ +struct metadata_io_request_asynch { + struct ocf_cache *cache; + struct metadata_io_request *reqs; + void *context; + int error; + size_t reqs_limit; + env_atomic req_remaining; + env_atomic req_active; + uint32_t page; + ocf_metadata_io_hndl_on_write_t on_complete; +}; + +/** + * @brief Metadata read end callback + * + * @param cache Cache instance + * @param sector_addr Begin sector of metadata + * @param sector_no Number of sectors + * @param data Data environment buffer with atomic metadata + * + * @retval 0 Success + * @retval Non-zero Error which will bee finally returned to the caller + */ +typedef int (*ocf_metadata_atomic_io_event_t)( + struct ocf_cache *cache, uint64_t sector_addr, + uint32_t sector_no, ctx_data_t *data); + +/** + * @brief Write page request + * + * @param cache - Cache instance + * @param data - Data to be written for specified page + * @param page - Page of SSD (cache device) where data has to be placed + * @return 0 - No errors, otherwise error occurred + */ +int metadata_io_write(struct ocf_cache *cache, + void *data, uint32_t page); + +int metadata_io_read_i_atomic(struct ocf_cache *cache, + ocf_metadata_atomic_io_event_t hndl); + +/** + * @brief Iterative pages write + * + * @param cache - Cache instance + * @param page - Start page of SSD (cache device) where data will be written + * @param count - Counts of page to be processed + * @param hndl_fn - Fill callback is called to fill each pages with data + * @param hndl_cntx - Caller context which is passed on fill callback request + * + * @return 0 - No errors, otherwise error occurred + */ +int metadata_io_write_i(struct ocf_cache *cache, + uint32_t page, uint32_t count, + ocf_metadata_io_event_t hndl_fn, void *hndl_cntx); + +/** + * * @brief Iterative pages read + * + * @param cache - Cache instance + * @param page - Start page of SSD (cache device) of data will be read + * @param count - Counts of page to be processed + * @param hndl_fn - Callback function is called on each page read completion + * @param hndl_cntx - Caller context passed during handle function call + * + * @return 0 - No errors, otherwise error occurred + */ +int metadata_io_read_i(struct ocf_cache *cache, + uint32_t page, uint32_t count, + ocf_metadata_io_event_t hndl_fn, void *hndl_cntx); + +/** + * @brief Iterative asynchronous pages write + * + * @param cache - Cache instance + * @param context - Read context + * @param page - Start page of SSD (cache device) where data will be written + * @param count - Counts of page to be processed + * @param fill - Fill callback + * @param complete - All IOs completed callback + * + * @return 0 - No errors, otherwise error occurred + */ +int metadata_io_write_i_asynch(struct ocf_cache *cache, uint32_t queue, + void *context, uint32_t page, uint32_t count, + ocf_metadata_io_event_t fill_hndl, + ocf_metadata_io_hndl_on_write_t compl_hndl); + +/** + * Function for initializing metadata io. + */ +int ocf_metadata_io_init(ocf_cache_t cache); + +/** + * Function for deinitializing metadata io. + */ +void ocf_metadata_io_deinit(ocf_cache_t cache); + +#endif /* METADATA_IO_UTILS_H_ */ diff --git a/src/metadata/metadata_misc.c b/src/metadata/metadata_misc.c new file mode 100644 index 0000000..04b0672 --- /dev/null +++ b/src/metadata/metadata_misc.c @@ -0,0 +1,126 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "metadata.h" +#include "../utils/utils_cache_line.h" + +static bool _is_cache_line_acting(struct ocf_cache *cache, + uint32_t cache_line, ocf_core_id_t core_id, + uint64_t start_line, uint64_t end_line) +{ + ocf_core_id_t tmp_core_id; + uint64_t core_line; + + ocf_metadata_get_core_info(cache, cache_line, + &tmp_core_id, &core_line); + + if (core_id != OCF_CORE_ID_INVALID) { + if (core_id != tmp_core_id) + return false; + + if (core_line < start_line || core_line > end_line) + return false; + + } else if (tmp_core_id == OCF_CORE_ID_INVALID) { + return false; + } + + return true; +} + +/* + * Iterates over cache lines that belong to the core device with + * core ID = core_id whose core byte addresses are in the range + * [start_byte, end_byte] and applies actor(cache, cache_line) to all + * matching cache lines + * + * set partition_id to PARTITION_INVALID to not care about partition_id + * + * METADATA lock must be held before calling this function + */ +int ocf_metadata_actor(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_core_id_t core_id, + uint64_t start_byte, uint64_t end_byte, + ocf_metadata_actor_t actor) +{ + uint32_t step = 0; + ocf_cache_line_t i, next_i; + uint64_t start_line, end_line; + int ret = 0; + + start_line = ocf_bytes_2_lines(cache, start_byte); + end_line = ocf_bytes_2_lines(cache, end_byte); + + if (part_id != PARTITION_INVALID) { + for (i = cache->user_parts[part_id].runtime->head; + i != cache->device->collision_table_entries; + i = next_i) { + next_i = ocf_metadata_get_partition_next(cache, i); + + if (_is_cache_line_acting(cache, i, core_id, + start_line, end_line)) { + if (ocf_cache_line_is_used(cache, i)) + ret = -EAGAIN; + else + actor(cache, i); + } + + OCF_COND_RESCHED_DEFAULT(step); + } + } else { + for (i = 0; i < cache->device->collision_table_entries; ++i) { + if (_is_cache_line_acting(cache, i, core_id, + start_line, end_line)) { + if (ocf_cache_line_is_used(cache, i)) + ret = -EAGAIN; + else + actor(cache, i); + } + + OCF_COND_RESCHED_DEFAULT(step); + } + } + + return ret; +} + +/* the caller must hold the relevant cache block concurrency reader lock + * and the metadata lock + */ +void ocf_metadata_sparse_cache_line(struct ocf_cache *cache, + uint32_t cache_line) +{ + ocf_part_id_t partition_id = + ocf_metadata_get_partition_id(cache, cache_line); + + ocf_metadata_remove_from_collision(cache, cache_line, partition_id); + + ocf_metadata_remove_from_partition(cache, partition_id, cache_line); + + ocf_metadata_add_to_free_list(cache, cache_line); +} + +static void _ocf_metadata_sparse_cache_line(struct ocf_cache *cache, + uint32_t cache_line) +{ + set_cache_line_invalid_no_flush(cache, 0, ocf_line_end_sector(cache), + cache_line); + + /* + * This is especially for removing inactive core + */ + metadata_clear_dirty(cache, cache_line); +} + +/* caller must hold metadata lock + * set core_id to -1 to clean the whole cache device + */ +int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id, + uint64_t start_byte, uint64_t end_byte) +{ + return ocf_metadata_actor(cache, PARTITION_INVALID, core_id, + start_byte, end_byte, _ocf_metadata_sparse_cache_line); +} diff --git a/src/metadata/metadata_misc.h b/src/metadata/metadata_misc.h new file mode 100644 index 0000000..2d2c8b1 --- /dev/null +++ b/src/metadata/metadata_misc.h @@ -0,0 +1,30 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_MISC_H__ +#define __METADATA_MISC_H__ + +static inline ocf_cache_line_t ocf_metadata_hash_func(ocf_cache_t cache, + uint64_t cache_line_num, ocf_core_id_t core_id) +{ + return (ocf_cache_line_t) ((cache_line_num * (core_id + 1)) % + cache->device->hash_table_entries); +} + +void ocf_metadata_sparse_cache_line(struct ocf_cache *cache, + ocf_cache_line_t cache_line); + +int ocf_metadata_sparse_range(struct ocf_cache *cache, int core_id, + uint64_t start_byte, uint64_t end_byte); + +typedef void (*ocf_metadata_actor_t)(struct ocf_cache *cache, + ocf_cache_line_t cache_line); + +int ocf_metadata_actor(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_core_id_t core_id, + uint64_t start_byte, uint64_t end_byte, + ocf_metadata_actor_t actor); + +#endif /* __METADATA_MISC_H__ */ diff --git a/src/metadata/metadata_partition.c b/src/metadata/metadata_partition.c new file mode 100644 index 0000000..de02dcc --- /dev/null +++ b/src/metadata/metadata_partition.c @@ -0,0 +1,227 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "metadata.h" +#include "../utils/utils_part.h" + +/* Sets the given collision_index as the new _head_ of the Partition list. */ +static void update_partition_head(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_cache_line_t line) +{ + struct ocf_user_part *part = &cache->user_parts[part_id]; + + part->runtime->head = line; +} + +void ocf_metadata_remove_from_free_list(struct ocf_cache *cache, + ocf_cache_line_t cline) +{ + struct ocf_part *free_list = cache->device->freelist_part; + int is_head, is_tail; + ocf_part_id_t invalid_part_id = PARTITION_INVALID; + ocf_cache_line_t prev, next; + ocf_cache_line_t line_entries = cache->device->collision_table_entries; + + ENV_BUG_ON(cline >= line_entries); + + /* Get Partition info */ + ocf_metadata_get_partition_info(cache, cline, NULL, &next, &prev); + + /* Find out if this node is Partition _head_ */ + is_head = (prev == line_entries); + is_tail = (next == line_entries); + + /* Case 1: If we are head and there is only one node. So unlink node + * and set that there is no node left in the list. + */ + if (is_head && (free_list->curr_size == 1)) { + ocf_metadata_set_partition_info(cache, cline, invalid_part_id, + line_entries, line_entries); + free_list->head = line_entries; + free_list->tail = line_entries; + } else if (is_head) { + /* Case 2: else if this collision_index is partition list head, + * but many nodes, update head and return + */ + ENV_BUG_ON(next >= line_entries); + + free_list->head = next; + ocf_metadata_set_partition_prev(cache, next, line_entries); + ocf_metadata_set_partition_next(cache, cline, line_entries); + } else if (is_tail) { + /* Case 3: else if this cline is partition list tail */ + ENV_BUG_ON(prev >= line_entries); + + free_list->tail = prev; + ocf_metadata_set_partition_prev(cache, cline, line_entries); + ocf_metadata_set_partition_next(cache, prev, line_entries); + } else { + /* Case 4: else this collision_index is a middle node. + * There is no change to the head and the tail pointers. + */ + + ENV_BUG_ON(next >= line_entries || prev >= line_entries); + + /* Update prev and next nodes */ + ocf_metadata_set_partition_prev(cache, next, prev); + ocf_metadata_set_partition_next(cache, prev, next); + + /* Update the given node */ + ocf_metadata_set_partition_info(cache, cline, invalid_part_id, + line_entries, line_entries); + } + + free_list->curr_size--; +} + +void ocf_metadata_add_to_free_list(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + struct ocf_part *free_list = cache->device->freelist_part; + ocf_cache_line_t tail; + ocf_cache_line_t line_entries = cache->device->collision_table_entries; + ocf_part_id_t invalid_part_id = PARTITION_INVALID; + + ENV_BUG_ON(line >= line_entries); + + if (free_list->curr_size == 0) { + free_list->head = line; + free_list->tail = line; + + ocf_metadata_set_partition_info(cache, line, invalid_part_id, + line_entries, line_entries); + } else { + tail = free_list->tail; + + ENV_BUG_ON(tail >= line_entries); + + ocf_metadata_set_partition_info(cache, line, invalid_part_id, + line_entries, tail); + ocf_metadata_set_partition_next(cache, tail, line); + + free_list->tail = line; + } + + free_list->curr_size++; +} + + +/* Adds the given collision_index to the _head_ of the Partition list */ +void ocf_metadata_add_to_partition(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_cache_line_t line) +{ + ocf_cache_line_t line_head; + ocf_cache_line_t line_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[part_id]; + + ENV_BUG_ON(!(line < line_entries)); + + /* First node to be added/ */ + if (!part->runtime->curr_size) { + + update_partition_head(cache, part_id, line); + ocf_metadata_set_partition_info(cache, line, part_id, + line_entries, line_entries); + + if (!ocf_part_is_valid(part)) { + /* Partition becomes empty, and is not valid + * update list of partitions + */ + ocf_part_sort(cache); + } + + } else { + /* Not the first node to be added. */ + line_head = part->runtime->head; + + ENV_BUG_ON(!(line_head < line_entries)); + + ocf_metadata_set_partition_info(cache, line, part_id, + line_head, line_entries); + + ocf_metadata_set_partition_prev(cache, line_head, line); + + update_partition_head(cache, part_id, line); + } + + part->runtime->curr_size++; +} + +/* Deletes the node with the given collision_index from the Partition list */ +void ocf_metadata_remove_from_partition(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_cache_line_t line) +{ + int is_head, is_tail; + ocf_cache_line_t prev_line, next_line; + uint32_t line_entries = cache->device->collision_table_entries; + struct ocf_user_part *part = &cache->user_parts[part_id]; + + ENV_BUG_ON(!(line < line_entries)); + + /* Get Partition info */ + ocf_metadata_get_partition_info(cache, line, NULL, + &next_line, &prev_line); + + /* Find out if this node is Partition _head_ */ + is_head = (prev_line == line_entries); + is_tail = (next_line == line_entries); + + /* Case 1: If we are head and there is only one node. So unlink node + * and set that there is no node left in the list. + */ + if (is_head && (part->runtime->curr_size == 1)) { + ocf_metadata_set_partition_info(cache, line, + part_id, line_entries, line_entries); + + update_partition_head(cache, part_id, line_entries); + + if (!ocf_part_is_valid(part)) { + /* Partition becomes not empty, and is not valid + * update list of partitions + */ + ocf_part_sort(cache); + } + + } else if (is_head) { + /* Case 2: else if this collision_index is partition list head, + * but many nodes, update head and return + */ + ENV_BUG_ON(!(next_line < line_entries)); + update_partition_head(cache, part_id, next_line); + + ocf_metadata_set_partition_next(cache, line, line_entries); + + ocf_metadata_set_partition_prev(cache, next_line, + line_entries); + } else if (is_tail) { + /* Case 3: else if this collision_index is partition list tail + */ + ENV_BUG_ON(!(prev_line < line_entries)); + + ocf_metadata_set_partition_prev(cache, line, line_entries); + + ocf_metadata_set_partition_next(cache, prev_line, + line_entries); + } else { + /* Case 4: else this collision_index is a middle node. + * There is no change to the head and the tail pointers. + */ + + ENV_BUG_ON(!(next_line < line_entries)); + ENV_BUG_ON(!(prev_line < line_entries)); + + /* Update prev and next nodes */ + ocf_metadata_set_partition_next(cache, prev_line, next_line); + + ocf_metadata_set_partition_prev(cache, next_line, prev_line); + + /* Update the given node */ + ocf_metadata_set_partition_info(cache, line, part_id, + line_entries, line_entries); + } + + part->runtime->curr_size--; +} diff --git a/src/metadata/metadata_partition.h b/src/metadata/metadata_partition.h new file mode 100644 index 0000000..73d1c26 --- /dev/null +++ b/src/metadata/metadata_partition.h @@ -0,0 +1,78 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_PARTITION_H__ +#define __METADATA_PARTITION_H__ + +#include "metadata_partition_structs.h" +#include "../ocf_cache_priv.h" + +#define PARTITION_DEFAULT 0 +#define PARTITION_INVALID ((ocf_part_id_t)-1) +#define PARTITION_SIZE_MAX ((ocf_cache_line_t)-1) + +static inline ocf_part_id_t ocf_metadata_get_partition_id( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_partition_id(cache, line); +} + +static inline ocf_cache_line_t ocf_metadata_get_partition_next( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_partition_next(cache, line); +} + +static inline ocf_cache_line_t ocf_metadata_get_partition_prev( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + return cache->metadata.iface.get_partition_prev(cache, line); +} + +static inline void ocf_metadata_get_partition_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_part_id_t *part_id, ocf_cache_line_t *next_line, + ocf_cache_line_t *prev_line) +{ + cache->metadata.iface.get_partition_info(cache, line, part_id, + next_line, prev_line); +} + +static inline void ocf_metadata_set_partition_next( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t next_line) +{ + cache->metadata.iface.set_partition_next(cache, line, next_line); +} + +static inline void ocf_metadata_set_partition_prev( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_cache_line_t prev_line) +{ + cache->metadata.iface.set_partition_prev(cache, line, prev_line); +} + +static inline void ocf_metadata_set_partition_info( + struct ocf_cache *cache, ocf_cache_line_t line, + ocf_part_id_t part_id, ocf_cache_line_t next_line, + ocf_cache_line_t prev_line) +{ + cache->metadata.iface.set_partition_info(cache, line, part_id, + next_line, prev_line); +} + +void ocf_metadata_add_to_free_list(struct ocf_cache *cache, + ocf_cache_line_t cline); + +void ocf_metadata_remove_from_free_list(struct ocf_cache *cache, + ocf_cache_line_t cline); + +void ocf_metadata_add_to_partition(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_cache_line_t line); + +void ocf_metadata_remove_from_partition(struct ocf_cache *cache, + ocf_part_id_t part_id, ocf_cache_line_t line); + +#endif /* __METADATA_PARTITION_H__ */ diff --git a/src/metadata/metadata_partition_structs.h b/src/metadata/metadata_partition_structs.h new file mode 100644 index 0000000..c8ed593 --- /dev/null +++ b/src/metadata/metadata_partition_structs.h @@ -0,0 +1,50 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_PARTITION_STRUCTS_H__ +#define __METADATA_PARTITION_STRUCTS_H__ + +#include "../utils/utils_list.h" +#include "../cleaning/cleaning.h" +#include "../eviction/eviction.h" + +struct ocf_part { + ocf_cache_line_t head; + ocf_cache_line_t tail; + uint32_t curr_size; +}; + +struct ocf_user_part_config { + char name[OCF_IO_CLASS_NAME_MAX]; + uint32_t min_size; + uint32_t max_size; + int16_t priority; + ocf_cache_mode_t cache_mode; + struct { + uint8_t valid : 1; + uint8_t added : 1; + uint8_t eviction : 1; + /*!< This bits is setting during partition sorting, + * and means that can evict from this partition + */ + } flags; +}; + +struct ocf_user_part_runtime { + uint32_t curr_size; + uint32_t head; + struct eviction_policy eviction; + struct cleaning_policy cleaning; +}; + +struct ocf_user_part { + struct ocf_user_part_config *config; + struct ocf_user_part_runtime *runtime; + + struct ocf_lst_entry lst_valid; +}; + + +#endif /* __METADATA_PARTITION_STRUCTS_H__ */ diff --git a/src/metadata/metadata_raw.c b/src/metadata/metadata_raw.c new file mode 100644 index 0000000..2e16ecd --- /dev/null +++ b/src/metadata/metadata_raw.c @@ -0,0 +1,609 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_hash.h" +#include "metadata_raw.h" +#include "metadata_io.h" +#include "metadata_raw_atomic.h" +#include "../ocf_def_priv.h" + +#define OCF_METADATA_RAW_DEBUG 0 + +#if 1 == OCF_METADATA_RAW_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(log_info, "[Metadata][Raw] %s\n", __func__) + +#define OCF_DEBUG_MSG(cache, msg) \ + ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - %s\n", \ + __func__, msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Metadata][Raw] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +/******************************************************************************* + * Common RAW Implementation + ******************************************************************************/ +/* + * Check if page is valid for specified RAW descriptor + */ +static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page) +{ + ENV_BUG_ON(page < raw->ssd_pages_offset); + ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages)); + + return true; +} + +/******************************************************************************* + * RAW RAM Implementation + ******************************************************************************/ +#define _RAW_RAM_ADDR(raw, line) \ + (raw->mem_pool + (((uint64_t)raw->entry_size * (line)))) + +#define _RAW_RAM_PAGE(raw, line) \ + ((line) / raw->entries_in_page) + +#define _RAW_RAM_PAGE_SSD(raw, line) \ + (raw->ssd_pages_offset + _RAW_RAM_PAGE(raw, line)) + +#define _RAW_RAM_ADDR_PAGE(raw, line) \ + (_RAW_RAM_ADDR(raw, \ + _RAW_RAM_PAGE(raw, line) * raw->entries_in_page)) + +#define _RAW_RAM_GET(raw, line, data) \ + env_memcpy(data, raw->entry_size, _RAW_RAM_ADDR(raw, (line)), \ + raw->entry_size) + +#define _RAW_RAM_SET(raw, line, data) \ + env_memcpy(_RAW_RAM_ADDR(raw, line), raw->entry_size, \ + data, raw->entry_size) + + + +/* + * RAM Implementation - De-Initialize + */ +static int _raw_ram_deinit(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + OCF_DEBUG_TRACE(cache); + + if (raw->mem_pool) { + env_vfree(raw->mem_pool); + raw->mem_pool = NULL; + } + + return 0; +} + +/* + * RAM Implementation - Initialize + */ +static int _raw_ram_init(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + size_t mem_pool_size; + + OCF_DEBUG_TRACE(cache); + + /* Allocate memory pool for entries */ + mem_pool_size = raw->ssd_pages; + mem_pool_size *= PAGE_SIZE; + raw->mem_pool_limit = mem_pool_size; + raw->mem_pool = env_vzalloc(mem_pool_size); + if (!raw->mem_pool) + return -ENOMEM; + + return 0; +} + +/* + * RAM Implementation - Size of + */ +static size_t _raw_ram_size_of(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + size_t size; + + size = raw->ssd_pages; + size *= PAGE_SIZE; + + return size; +} + +/* + * RAM Implementation - Size on SSD + */ +static uint32_t _raw_ram_size_on_ssd(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + const size_t alignment = 128 * KiB / PAGE_SIZE; + + return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment; +} + +/* + * RAM Implementation - Checksum + */ +static uint32_t _raw_ram_checksum(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + uint64_t i; + uint32_t step = 0; + uint32_t crc = 0; + + for (i = 0; i < raw->ssd_pages; i++) { + crc = env_crc32(crc, raw->mem_pool + PAGE_SIZE * i, PAGE_SIZE); + OCF_COND_RESCHED(step, 10000); + } + + return crc; +} + +/* + * RAM Implementation - Get entry + */ +static int _raw_ram_get(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size) +{ + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); + + return _RAW_RAM_GET(raw, line, data); +} + +/* + * RAM Implementation - Read only entry access + */ +static const void *_raw_ram_rd_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size) +{ + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); + + return _RAW_RAM_ADDR(raw, line); +} + +/* + * RAM Implementation - Read only entry access + */ +static void *_raw_ram_wr_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size) +{ + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); + + return _RAW_RAM_ADDR(raw, line); +} + +/* + * RAM Implementation - Set Entry + */ +static int _raw_ram_set(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size) +{ + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); + + return _RAW_RAM_SET(raw, line, data); +} + +/* + * RAM Implementation - Flush specified element from SSD + */ +static int _raw_ram_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line) +{ + OCF_DEBUG_PARAM(cache, "Line = %u", line); + OCF_DEBUG_PARAM(cache, "Page = %llu", _RAW_RAM_PAGE(raw, line)); + + ENV_BUG_ON(!_raw_is_valid(raw, line, raw->entry_size)); + + return metadata_io_write(cache, _RAW_RAM_ADDR_PAGE(raw, line), + _RAW_RAM_PAGE_SSD(raw, line)); +} + +/* + * RAM Implementation - Load all IO callback + */ +static int _raw_ram_load_all_io(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + ocf_cache_line_t line; + uint32_t raw_page; + struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *) context; + uint32_t size = raw->entry_size * raw->entries_in_page; + + ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); + ENV_BUG_ON(size > PAGE_SIZE); + + raw_page = page - raw->ssd_pages_offset; + line = raw_page * raw->entries_in_page; + + OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page); + + ctx_data_rd_check(cache->owner, _RAW_RAM_ADDR(raw, line), data, size); + ctx_data_seek(cache->owner, data, ctx_data_seek_current, + PAGE_SIZE - size); + + return 0; +} + +/* + * RAM Implementation - Load all metadata elements from SSD + */ +static int _raw_ram_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + OCF_DEBUG_TRACE(cache); + + return metadata_io_read_i(cache, raw->ssd_pages_offset, + raw->ssd_pages, _raw_ram_load_all_io, raw); +} + +/* + * RAM Implementation - Flush IO callback - Fill page + */ +static int _raw_ram_flush_all_fill(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + ocf_cache_line_t line; + uint32_t raw_page; + struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context; + uint32_t size = raw->entry_size * raw->entries_in_page; + + ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); + ENV_BUG_ON(size > PAGE_SIZE); + + raw_page = page - raw->ssd_pages_offset; + line = raw_page * raw->entries_in_page; + + OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page); + + ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size); + ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size); + + return 0; +} + +/* + * RAM Implementation - Flush all elements + */ +static int _raw_ram_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + OCF_DEBUG_TRACE(cache); + + return metadata_io_write_i(cache, raw->ssd_pages_offset, + raw->ssd_pages, _raw_ram_flush_all_fill, raw); +} + +/* + * RAM RAM Implementation - Mark to Flush + */ +static void _raw_ram_flush_mark(struct ocf_cache *cache, + struct ocf_request *rq, uint32_t map_idx, int to_state, + uint8_t start, uint8_t stop) +{ + if (to_state == DIRTY || to_state == CLEAN) { + rq->map[map_idx].flush = true; + rq->info.flush_metadata = true; + } +} + +/******************************************************************************* + * RAM RAM Implementation - Do Flush Asynchronously + ******************************************************************************/ +struct _raw_ram_flush_ctx { + struct ocf_metadata_raw *raw; + struct ocf_request *rq; + ocf_metadata_asynch_flush_hndl complete; + env_atomic flush_req_cnt; + int error; +}; + +static void _raw_ram_flush_do_asynch_io_complete(struct ocf_cache *cache, + void *context, int error) +{ + struct _raw_ram_flush_ctx *ctx = context; + + if (error) { + ctx->error = error; + ocf_metadata_error(cache); + } + + if (env_atomic_dec_return(&ctx->flush_req_cnt)) + return; + + OCF_DEBUG_MSG(cache, "Asynchronous flushing complete"); + + /* Call metadata flush completed call back */ + ctx->rq->error |= ctx->error; + ctx->complete(ctx->rq, ctx->error); + + env_free(ctx); +} + +/* + * RAM Implementation - Flush IO callback - Fill page + */ +static int _raw_ram_flush_do_asynch_fill(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + ocf_cache_line_t line; + uint32_t raw_page; + struct _raw_ram_flush_ctx *ctx = context; + struct ocf_metadata_raw *raw = NULL; + uint64_t size; + + ENV_BUG_ON(!ctx); + + raw = ctx->raw; + ENV_BUG_ON(!raw); + + size = raw->entry_size * raw->entries_in_page; + ENV_BUG_ON(size > PAGE_SIZE); + + raw_page = page - raw->ssd_pages_offset; + line = raw_page * raw->entries_in_page; + + OCF_DEBUG_PARAM(cache, "Line = %u, Page = %u", line, raw_page); + + ctx_data_wr_check(cache->owner, data, _RAW_RAM_ADDR(raw, line), size); + ctx_data_zero_check(cache->owner, data, PAGE_SIZE - size); + + return 0; +} + +/* + * RAM RAM Implementation - Do Flush + */ + +int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2) +{ + uint32_t *page1 = (uint32_t *)item1; + uint32_t *page2 = (uint32_t *)item2; + + if (*page1 > *page2) + return 1; + + if (*page1 < *page2) + return -1; + + return 0; +} + +static void __raw_ram_flush_do_asynch_add_pages(struct ocf_request *rq, + uint32_t *pages_tab, struct ocf_metadata_raw *raw, + int *pages_to_flush) { + int i, j = 0; + int line_no = rq->core_line_count; + struct ocf_map_info *map; + + for (i = 0; i < line_no; i++) { + map = &rq->map[i]; + if (map->flush) { + pages_tab[j] = _RAW_RAM_PAGE(raw, map->coll_idx); + j++; + } + } + + *pages_to_flush = j; +} + +static int _raw_ram_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete) +{ + int result = 0, i; + uint32_t __pages_tab[MAX_STACK_TAB_SIZE]; + uint32_t *pages_tab; + int line_no = rq->core_line_count; + int pages_to_flush; + uint32_t start_page = 0; + uint32_t count = 0; + struct _raw_ram_flush_ctx *ctx; + + ENV_BUG_ON(!complete); + + OCF_DEBUG_TRACE(cache); + + if (!rq->info.flush_metadata) { + /* Nothing to flush call flush callback */ + complete(rq, 0); + return 0; + } + + ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO); + if (!ctx) { + complete(rq, -ENOMEM); + return -ENOMEM; + } + + ctx->rq = rq; + ctx->complete = complete; + ctx->raw = raw; + env_atomic_set(&ctx->flush_req_cnt, 1); + + if (line_no <= MAX_STACK_TAB_SIZE) { + pages_tab = __pages_tab; + } else { + pages_tab = env_zalloc(sizeof(*pages_tab) * line_no, ENV_MEM_NOIO); + if (!pages_tab) { + env_free(ctx); + complete(rq, -ENOMEM); + return -ENOMEM; + } + } + + /* While sorting in progress keep request remaining equal to 1, + * to prevent freeing of asynchronous context + */ + + __raw_ram_flush_do_asynch_add_pages(rq, pages_tab, raw, + &pages_to_flush); + + env_sort(pages_tab, pages_to_flush, sizeof(*pages_tab), + _raw_ram_flush_do_page_cmp, NULL); + + i = 0; + while (i < pages_to_flush) { + start_page = pages_tab[i]; + count = 1; + + while (true) { + if ((i + 1) >= pages_to_flush) + break; + + if (pages_tab[i] == pages_tab[i + 1]) { + i++; + continue; + } + + if ((pages_tab[i] + 1) != pages_tab[i + 1]) + break; + + i++; + count++; + } + + + env_atomic_inc(&ctx->flush_req_cnt); + + result |= metadata_io_write_i_asynch(cache, rq->io_queue, ctx, + raw->ssd_pages_offset + start_page, count, + _raw_ram_flush_do_asynch_fill, + _raw_ram_flush_do_asynch_io_complete); + + if (result) + break; + + i++; + } + + _raw_ram_flush_do_asynch_io_complete(cache, ctx, result); + + if (line_no > MAX_STACK_TAB_SIZE) + env_free(pages_tab); + + return result; +} + +/******************************************************************************* + * RAW Interfaces definitions + ******************************************************************************/ +#include "metadata_raw_dynamic.h" +#include "metadata_raw_volatile.h" + +static const struct raw_iface IRAW[metadata_raw_type_max] = { + [metadata_raw_type_ram] = { + .init = _raw_ram_init, + .deinit = _raw_ram_deinit, + .size_of = _raw_ram_size_of, + .size_on_ssd = _raw_ram_size_on_ssd, + .checksum = _raw_ram_checksum, + .get = _raw_ram_get, + .set = _raw_ram_set, + .rd_access = _raw_ram_rd_access, + .wr_access = _raw_ram_wr_access, + .flush = _raw_ram_flush, + .load_all = _raw_ram_load_all, + .flush_all = _raw_ram_flush_all, + .flush_mark = _raw_ram_flush_mark, + .flush_do_asynch = _raw_ram_flush_do_asynch, + }, + [metadata_raw_type_dynamic] = { + .init = raw_dynamic_init, + .deinit = raw_dynamic_deinit, + .size_of = raw_dynamic_size_of, + .size_on_ssd = raw_dynamic_size_on_ssd, + .checksum = raw_dynamic_checksum, + .get = raw_dynamic_get, + .set = raw_dynamic_set, + .rd_access = raw_dynamic_rd_access, + .wr_access = raw_dynamic_wr_access, + .flush = raw_dynamic_flush, + .load_all = raw_dynamic_load_all, + .flush_all = raw_dynamic_flush_all, + .flush_mark = raw_dynamic_flush_mark, + .flush_do_asynch = raw_dynamic_flush_do_asynch, + }, + [metadata_raw_type_volatile] = { + .init = _raw_ram_init, + .deinit = _raw_ram_deinit, + .size_of = _raw_ram_size_of, + .size_on_ssd = raw_volatile_size_on_ssd, + .checksum = raw_volatile_checksum, + .get = _raw_ram_get, + .set = _raw_ram_set, + .rd_access = _raw_ram_rd_access, + .wr_access = _raw_ram_wr_access, + .flush = raw_volatile_flush, + .load_all = raw_volatile_load_all, + .flush_all = raw_volatile_flush_all, + .flush_mark = raw_volatile_flush_mark, + .flush_do_asynch = raw_volatile_flush_do_asynch, + }, + [metadata_raw_type_atomic] = { + .init = _raw_ram_init, + .deinit = _raw_ram_deinit, + .size_of = _raw_ram_size_of, + .size_on_ssd = _raw_ram_size_on_ssd, + .checksum = _raw_ram_checksum, + .get = _raw_ram_get, + .set = _raw_ram_set, + .rd_access = _raw_ram_rd_access, + .wr_access = _raw_ram_wr_access, + .flush = _raw_ram_flush, + .load_all = _raw_ram_load_all, + .flush_all = _raw_ram_flush_all, + .flush_mark = raw_atomic_flush_mark, + .flush_do_asynch = raw_atomic_flush_do_asynch, + }, +}; + +/******************************************************************************* + * RAW Top interface implementation + ******************************************************************************/ + +int ocf_metadata_raw_init(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + ENV_BUG_ON(raw->raw_type < metadata_raw_type_min); + ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max); + + raw->iface = &(IRAW[raw->raw_type]); + return raw->iface->init(cache, raw); +} + +int ocf_metadata_raw_deinit(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + int result; + + if (!raw->iface) + return 0; + + result = raw->iface->deinit(cache, raw); + raw->iface = NULL; + + return result; +} + +size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache, + struct ocf_metadata_raw* raw) +{ + ENV_BUG_ON(raw->raw_type < metadata_raw_type_min); + ENV_BUG_ON(raw->raw_type >= metadata_raw_type_max); + + return IRAW[raw->raw_type].size_on_ssd(cache, raw); +} diff --git a/src/metadata/metadata_raw.h b/src/metadata/metadata_raw.h new file mode 100644 index 0000000..dbb9391 --- /dev/null +++ b/src/metadata/metadata_raw.h @@ -0,0 +1,352 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_RAW_H__ +#define __METADATA_RAW_H__ + +/** + * @file metadata_raw.h + * @brief Metadata RAW container implementation + */ + +/** + * @brief Metadata raw type + */ +enum ocf_metadata_raw_type { + /** + * @brief Default implementation with support of + * flushing to/landing from SSD + */ + metadata_raw_type_ram = 0, + + /** + * @brief Dynamic implementation, elements are allocated when first + * time called + */ + metadata_raw_type_dynamic, + + /** + * @brief This containers does not flush metadata on SSD and does not + * Support loading from SSD + */ + metadata_raw_type_volatile, + + /** + * @brief Implementation for atomic device used as cache + */ + metadata_raw_type_atomic, + + metadata_raw_type_max, /*!< MAX */ + metadata_raw_type_min = metadata_raw_type_ram /*!< MAX */ +}; + +/** + * @brief RAW instance descriptor + */ +struct ocf_metadata_raw { + /** + * @name Metadata and RAW types + */ + enum ocf_metadata_segment metadata_segment; /*!< Metadata segment */ + enum ocf_metadata_raw_type raw_type; /*!< RAW implementation type */ + + /** + * @name Metdata elements description + */ + uint32_t entry_size; /*!< Size of particular entry */ + uint32_t entries_in_page; /*!< Numbers of entries in one page*/ + uint64_t entries; /*!< Numbers of entries */ + + /** + * @name Location on cache device description + */ + uint64_t ssd_pages_offset; /*!< SSD (Cache device) Page offset */ + uint64_t ssd_pages; /*!< Numbers of pages that are required */ + + const struct raw_iface *iface; /*!< RAW container interface*/ + + /** + * @name Private RAW elements + */ + void *mem_pool; /*!< Private memory pool*/ + + size_t mem_pool_limit; /*! Current memory pool size (limit) */ + + void *priv; /*!< Private data - context */ +}; + +/** + * RAW container interface + */ +struct raw_iface { + int (*init)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + int (*deinit)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + size_t (*size_of)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + /** + * @brief Return size which metadata take on cache device + * + * @param cache Cache instance + * @param raw RAW container of metadata + * + * @return Number of pages (4 kiB) on cache device + */ + uint32_t (*size_on_ssd)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + uint32_t (*checksum)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + + int (*get)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size); + + int (*set)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size); + + const void* (*rd_access)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size); + + void* (*wr_access)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, + ocf_cache_line_t line, uint32_t size); + + int (*flush)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line); + + int (*load_all)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + int (*flush_all)(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + + void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, + uint8_t stop); + + int (*flush_do_asynch)(struct ocf_cache *cache, struct ocf_request *rq, + struct ocf_metadata_raw *raw, + ocf_metadata_asynch_flush_hndl complete); +}; + +/** + * @brief Initialize RAW instance + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return 0 - Operation success, otherwise error + */ +int ocf_metadata_raw_init(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/** + * @brief De-Initialize RAW instance + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return 0 - Operation success, otherwise error + */ +int ocf_metadata_raw_deinit(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/** + * @brief Get memory footprint + * + * @param cache Cache instance + * @param raw RAW descriptor + * @return Memory footprint + */ +static inline size_t ocf_metadata_raw_size_of(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + if (!raw->iface) + return 0; + + return raw->iface->size_of(cache, raw); +} + +/** + * @brief Get SSD footprint + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return Size on SSD + */ +size_t ocf_metadata_raw_size_on_ssd(struct ocf_cache* cache, + struct ocf_metadata_raw* raw); + +/** + * @brief Calculate metadata checksum + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return Checksum + */ +static inline uint32_t ocf_metadata_raw_checksum(struct ocf_cache* cache, + struct ocf_metadata_raw* raw) +{ + return raw->iface->checksum(cache, raw); +} + +/** + * @brief Get specified element of metadata + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @param line - Cache line to be get + * @param data - Data where metadata entry will be copied into + * @param size - Size of data + * @return 0 - Operation success, otherwise error + */ +static inline int ocf_metadata_raw_get(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data, + uint32_t size) +{ + return raw->iface->get(cache, raw, line, data, size); +} + +/** + * @brief Access specified element of metadata directly + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @param line - Cache line to be get + * @param data - Data where metadata entry will be copied into + * @param size - Size of data + * @return 0 - Point to accessed data, in case of error NULL + */ +static inline void *ocf_metadata_raw_wr_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size) +{ + return raw->iface->wr_access(cache, raw, line, size); +} + +/** + * @brief Access specified element of metadata directly + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @param line - Cache line to be get + * @param data - Data where metadata entry will be copied into + * @param size - Size of data + * @return 0 - Point to accessed data, in case of error NULL + */ +static inline const void *ocf_metadata_raw_rd_access( + struct ocf_cache *cache, struct ocf_metadata_raw *raw, + ocf_cache_line_t line, uint32_t size) +{ + return raw->iface->rd_access(cache, raw, line, size); +} + +/** + * @brief Set specified element of metadata + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @param line - Cache line to be set + * @param data - Data taht will be copied into metadata entry + * @param size - Size of data + * @return 0 - Operation success, otherwise error + */ +static inline int ocf_metadata_raw_set(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, void *data, + uint32_t size) +{ + return raw->iface->set(cache, raw, line, data, size); +} + +/** + * @brief Flush specified element of metadata into SSD + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @param line - Cache line to be flushed + * @return 0 - Operation success, otherwise error + */ +static inline int ocf_metadata_raw_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line) +{ + return raw->iface->flush(cache, raw, line); +} + +/** + * @brief Load all entries from SSD cache (cahce cache) + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return 0 - Operation success, otherwise error + */ +static inline int ocf_metadata_raw_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return raw->iface->load_all(cache, raw); +} + +/** + * @brief Flush all entries for into SSD cache (cahce cache) + * + * @param cache - Cache instance + * @param raw - RAW descriptor + * @return 0 - Operation success, otherwise error + */ +static inline int ocf_metadata_raw_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return raw->iface->flush_all(cache, raw); +} + + +static inline void ocf_metadata_raw_flush_mark(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) +{ + raw->iface->flush_mark(cache, rq, map_idx, to_state, start, stop); +} + +static inline int ocf_metadata_raw_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_metadata_asynch_flush_hndl complete) +{ + return raw->iface->flush_do_asynch(cache, rq, raw, complete); +} + +/* + * Check if line is valid for specified RAW descriptor + */ +static inline bool _raw_is_valid(struct ocf_metadata_raw *raw, + ocf_cache_line_t line, uint32_t size) +{ + if (!raw) + return false; + + if (size != raw->entry_size) + return false; + + if (line >= raw->entries) + return false; + + return true; +} + +static inline void _raw_bug_on(struct ocf_metadata_raw *raw, + ocf_cache_line_t line, uint32_t size) +{ + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); +} + +#define MAX_STACK_TAB_SIZE 32 + +int _raw_ram_flush_do_page_cmp(const void *item1, const void *item2); + +#endif /* METADATA_RAW_H_ */ diff --git a/src/metadata/metadata_raw_atomic.c b/src/metadata/metadata_raw_atomic.c new file mode 100644 index 0000000..a48ac9b --- /dev/null +++ b/src/metadata/metadata_raw_atomic.c @@ -0,0 +1,260 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_io.h" +#include "metadata_hash.h" +#include "metadata_raw.h" +#include "metadata_raw_atomic.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_def_priv.h" + +#define OCF_METADATA_RAW_ATOMIC_DEBUG 0 + +#if 1 == OCF_METADATA_RAW_ATOMIC_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s\n", __func__) + +#define OCF_DEBUG_MSG(cache, msg) \ + ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - %s\n", \ + __func__, msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Metadata][Raw][Atomic] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +struct _raw_atomic_flush_ctx { + struct ocf_request *rq; + ocf_metadata_asynch_flush_hndl complete; + env_atomic flush_req_cnt; +}; + +static void _raw_atomic_io_discard_cmpl(struct _raw_atomic_flush_ctx *ctx, + int error) +{ + if (error) + ctx->rq->error = error; + + if (env_atomic_dec_return(&ctx->flush_req_cnt)) + return; + + if (ctx->rq->error) + ocf_metadata_error(ctx->rq->cache); + + /* Call metadata flush completed call back */ + OCF_DEBUG_MSG(cache, "Asynchronous flushing complete"); + + ctx->complete(ctx->rq, ctx->rq->error); + + env_free(ctx); +} + +static void _raw_atomic_io_discard_end(struct ocf_io *io, int error) +{ + struct _raw_atomic_flush_ctx *ctx = io->priv1; + + ocf_io_put(io); /* Release IO */ + + _raw_atomic_io_discard_cmpl(ctx, error); +} + +static int _raw_atomic_io_discard_do(struct ocf_cache *cache, void *context, + uint64_t start_addr, uint32_t len, struct _raw_atomic_flush_ctx *ctx) +{ + struct ocf_request *rq = context; + struct ocf_io *io = ocf_new_cache_io(cache); + + if (!io) { + rq->error = -ENOMEM; + return rq->error; + } + + OCF_DEBUG_PARAM(cache, "Page to flushing = %u, count of pages = %u", + start_line, len); + + env_atomic_inc(&ctx->flush_req_cnt); + + ocf_io_configure(io, start_addr, len, OCF_WRITE, 0, 0); + ocf_io_set_cmpl(io, ctx, NULL, _raw_atomic_io_discard_end); + + if (cache->device->obj.features.discard_zeroes) + ocf_dobj_submit_discard(io); + else + ocf_dobj_submit_write_zeroes(io); + + return rq->error; +} + +void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) +{ + if (to_state == INVALID) { + rq->map[map_idx].flush = true; + rq->map[map_idx].start_flush = start; + rq->map[map_idx].stop_flush = stop; + rq->info.flush_metadata = true; + } +} + +#define MAX_STACK_TAB_SIZE 32 + +static inline void _raw_atomic_add_page(struct ocf_cache *cache, + uint32_t *clines_tab, uint64_t line, int *idx) +{ + clines_tab[*idx] = ocf_metadata_map_lg2phy(cache, line); + (*idx)++; +} + +static int _raw_atomic_flush_do_asynch_sec(struct ocf_cache *cache, + struct ocf_request *rq, int map_idx, + struct _raw_atomic_flush_ctx *ctx) +{ + struct ocf_map_info *map = &rq->map[map_idx]; + uint32_t len = 0; + uint64_t start_addr; + int result = 0; + + start_addr = ocf_metadata_map_lg2phy(cache, map->coll_idx); + start_addr *= ocf_line_size(cache); + start_addr += cache->device->metadata_offset; + + start_addr += SECTORS_TO_BYTES(map->start_flush); + len = SECTORS_TO_BYTES(map->stop_flush - map->start_flush); + len += SECTORS_TO_BYTES(1); + + result = _raw_atomic_io_discard_do(cache, rq, start_addr, len, ctx); + + return result; +} + +int raw_atomic_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete) +{ + int result = 0, i; + uint32_t __clines_tab[MAX_STACK_TAB_SIZE]; + uint32_t *clines_tab; + int clines_to_flush = 0; + uint32_t len = 0; + int line_no = rq->core_line_count; + struct ocf_map_info *map; + uint64_t start_addr; + struct _raw_atomic_flush_ctx *ctx; + + ENV_BUG_ON(!complete); + + if (!rq->info.flush_metadata) { + /* Nothing to flush call flush callback */ + complete(rq, 0); + return 0; + } + + ctx = env_zalloc(sizeof(*ctx), ENV_MEM_NOIO); + if (!ctx) { + complete(rq, -ENOMEM); + return -ENOMEM; + } + + ctx->rq = rq; + ctx->complete = complete; + env_atomic_set(&ctx->flush_req_cnt, 1); + + if (line_no == 1) { + map = &rq->map[0]; + if (map->flush && map->status != LOOKUP_MISS) { + result = _raw_atomic_flush_do_asynch_sec(cache, rq, + 0, ctx); + } + _raw_atomic_io_discard_cmpl(ctx, result); + return result; + } + + if (line_no <= MAX_STACK_TAB_SIZE) { + clines_tab = __clines_tab; + } else { + clines_tab = env_zalloc(sizeof(*clines_tab) * line_no, + ENV_MEM_NOIO); + if (!clines_tab) { + complete(rq, -ENOMEM); + env_free(ctx); + return -ENOMEM; + } + } + + for (i = 0; i < line_no; i++) { + map = &rq->map[i]; + + if (!map->flush || map->status == LOOKUP_MISS) + continue; + + if (i == 0) { + /* First */ + if (map->start_flush) { + _raw_atomic_flush_do_asynch_sec(cache, rq, i, + ctx); + } else { + _raw_atomic_add_page(cache, clines_tab, + map->coll_idx, &clines_to_flush); + } + } else if (i == (line_no - 1)) { + /* Last */ + if (map->stop_flush != ocf_line_end_sector(cache)) { + _raw_atomic_flush_do_asynch_sec(cache, rq, + i, ctx); + } else { + _raw_atomic_add_page(cache, clines_tab, + map->coll_idx, &clines_to_flush); + } + } else { + /* Middle */ + _raw_atomic_add_page(cache, clines_tab, map->coll_idx, + &clines_to_flush); + } + + } + + env_sort(clines_tab, clines_to_flush, sizeof(*clines_tab), + _raw_ram_flush_do_page_cmp, NULL); + + i = 0; + while (i < clines_to_flush) { + start_addr = clines_tab[i]; + start_addr *= ocf_line_size(cache); + start_addr += cache->device->metadata_offset; + len = ocf_line_size(cache); + + while (true) { + if ((i + 1) >= clines_to_flush) + break; + + if ((clines_tab[i] + 1) != clines_tab[i + 1]) + break; + + i++; + len += ocf_line_size(cache); + } + + result |= _raw_atomic_io_discard_do(cache, rq, start_addr, + len, ctx); + + if (result) + break; + + i++; + } + + _raw_atomic_io_discard_cmpl(ctx, result); + + if (line_no > MAX_STACK_TAB_SIZE) + env_free(clines_tab); + + return result; +} diff --git a/src/metadata/metadata_raw_atomic.h b/src/metadata/metadata_raw_atomic.h new file mode 100644 index 0000000..35c252c --- /dev/null +++ b/src/metadata/metadata_raw_atomic.h @@ -0,0 +1,16 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_RAW_ATOMIC_H__ +#define __METADATA_RAW_ATOMIC_H__ + +void raw_atomic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); + +int raw_atomic_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete); + +#endif /* __METADATA_RAW_ATOMIC_H__ */ \ No newline at end of file diff --git a/src/metadata/metadata_raw_dynamic.c b/src/metadata/metadata_raw_dynamic.c new file mode 100644 index 0000000..305966b --- /dev/null +++ b/src/metadata/metadata_raw_dynamic.c @@ -0,0 +1,446 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_hash.h" +#include "metadata_raw.h" +#include "metadata_raw_dynamic.h" +#include "metadata_io.h" +#include "../utils/utils_io.h" +#include "../ocf_def_priv.h" + +#define OCF_METADATA_RAW_DEBUG 0 + +#if 1 == OCF_METADATA_RAW_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s\n", __func__) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +/******************************************************************************* + * Common RAW Implementation + ******************************************************************************/ + +/* + * Check if page is valid for specified RAW descriptor + */ +static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page) +{ + ENV_BUG_ON(page < raw->ssd_pages_offset); + ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages)); + + return true; +} + +/******************************************************************************* + * RAW dynamic Implementation + ******************************************************************************/ + +#define _RAW_DYNAMIC_PAGE(raw, line) \ + ((line) / raw->entries_in_page) + +#define _RAW_DYNAMIC_PAGE_OFFSET(raw, line) \ + ((line % raw->entries_in_page) * raw->entry_size) + +/* + * RAW DYNAMIC control structure + */ +struct _raw_ctrl { + env_mutex lock; + env_atomic count; + void *pages[]; +}; + +static void *_raw_dynamic_get_item(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size) +{ + void *new = NULL; + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + uint32_t page = _RAW_DYNAMIC_PAGE(raw, line); + + ENV_BUG_ON(!_raw_is_valid(raw, line, size)); + + OCF_DEBUG_PARAM(cache, "Accessing item %u on page %u", line, page); + + if (!ctrl->pages[page]) { + /* No page, allocate one, and set*/ + + /* This RAW container has some restrictions and need to check + * this limitation: + * 1. no atomic context when allocation + * 2. Only one allocator in time + */ + + ENV_BUG_ON(env_in_interrupt()); + + env_mutex_lock(&ctrl->lock); + + if (ctrl->pages[page]) { + /* Page has been already allocated, skip allocation */ + goto _raw_dynamic_get_item_SKIP; + } + + OCF_DEBUG_PARAM(cache, "New page allocation - %u", page); + + new = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (new) { + ctrl->pages[page] = new; + env_atomic_inc(&ctrl->count); + } + +_raw_dynamic_get_item_SKIP: + + env_mutex_unlock(&ctrl->lock); + } + + if (ctrl->pages[page]) + return ctrl->pages[page] + _RAW_DYNAMIC_PAGE_OFFSET(raw, line); + + return NULL; +} + +/* +* RAM DYNAMIC Implementation - De-Initialize +*/ +int raw_dynamic_deinit(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + uint32_t i; + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + + if (!ctrl) + return 0; + + OCF_DEBUG_TRACE(cache); + + for (i = 0; i < raw->ssd_pages; i++) + env_free(ctrl->pages[i]); + + env_vfree(ctrl); + raw->priv = NULL; + + return 0; +} + +/* + * RAM DYNAMIC Implementation - Initialize + */ +int raw_dynamic_init(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + struct _raw_ctrl *ctrl; + size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages); + + OCF_DEBUG_TRACE(cache); + + if (raw->entry_size > PAGE_SIZE) + return -1; + + ctrl = env_vmalloc(size); + if (!ctrl) + return -1; + + ENV_BUG_ON(env_memset(ctrl, size, 0)); + + if (env_mutex_init(&ctrl->lock)) { + env_vfree(ctrl); + return -1; + } + + raw->priv = ctrl; + + return 0; +} + +/* + * RAW DYNAMIC Implementation - Size of + */ +size_t raw_dynamic_size_of(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + size_t size; + + /* Size of allocated items */ + size = env_atomic_read(&ctrl->count); + size *= PAGE_SIZE; + + /* Size of control structure */ + size += sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages); + + OCF_DEBUG_PARAM(cache, "Count = %d, Size = %lu", + env_atomic_read(&ctrl->count), size); + + return size; +} + +/* + * RAW DYNAMIC Implementation - Size on SSD + */ +uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + const size_t alignment = 128 * KiB / PAGE_SIZE; + + return DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment; +} + +/* + * RAM DYNAMIC Implementation - Checksum + */ +uint32_t raw_dynamic_checksum(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + uint64_t i; + uint32_t step = 0; + uint32_t crc = 0; + + for (i = 0; i < raw->ssd_pages; i++) { + if (ctrl->pages[i]) + crc = env_crc32(crc, ctrl->pages[i], PAGE_SIZE); + OCF_COND_RESCHED(step, 10000); + } + + return crc; +} + +/* +* RAM DYNAMIC Implementation - Get +*/ +int raw_dynamic_get(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size) +{ + void *item = _raw_dynamic_get_item(cache, raw, line, size); + + if (!item) { + ENV_BUG_ON(env_memset(data, size, 0)); + ocf_metadata_error(cache); + return -1; + } + + return env_memcpy(data, size, item, size); +} + +/* +* RAM DYNAMIC Implementation - Set +*/ +int raw_dynamic_set(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size) +{ + void *item = _raw_dynamic_get_item(cache, raw, line, size); + + if (!item) { + ocf_metadata_error(cache); + return -1; + } + + return env_memcpy(item, size, data, size); +} + +/* +* RAM DYNAMIC Implementation - access +*/ +const void *raw_dynamic_rd_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size) +{ + return _raw_dynamic_get_item(cache, raw, line, size); +} + +/* +* RAM DYNAMIC Implementation - access +*/ +void *raw_dynamic_wr_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size) +{ + return _raw_dynamic_get_item(cache, raw, line, size); +} + +int raw_dynamic_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line) +{ + uint32_t page = _RAW_DYNAMIC_PAGE(raw, line); + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + + OCF_DEBUG_PARAM(cache, "Line %u, page = %u", line, page); + + ENV_BUG_ON(!ctrl->pages[page]); + + return metadata_io_write(cache, ctrl->pages[page], + raw->ssd_pages_offset + page); +} + +/* +* RAM DYNAMIC Implementation - Load all +*/ +#define RAW_DYNAMIC_LOAD_PAGES 128 + +int raw_dynamic_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + uint64_t i = 0, i_page = 0; + uint64_t count = RAW_DYNAMIC_LOAD_PAGES; + int error = 0, cmp; + + struct ocf_io *io; + ctx_data_t *data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES); + char *page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL); + char *zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL); + + if (!data || !page || !zpage) { + ctx_data_free(cache->owner, data); + env_free(page); + env_free(zpage); + return -ENOMEM; + } + + OCF_DEBUG_TRACE(cache); + + /* Loading, need to load all metadata, when page is zero set, no need + * to allocate space for it + */ + + while (i < raw->ssd_pages) { + if (i + count > raw->ssd_pages) + count = raw->ssd_pages - i; + + /* Allocate IO */ + io = ocf_new_cache_io(cache); + if (!io) { + error = -ENOMEM; + break; + } + + /* Setup IO */ + error = ocf_io_set_data(io, data, 0); + if (error) { + ocf_io_put(io); + break; + } + ocf_io_configure(io, + PAGES_TO_BYTES(raw->ssd_pages_offset + i), + PAGES_TO_BYTES(count), OCF_READ, 0, 0); + + /* Submit IO */ + error = ocf_submit_io_wait(io); + ocf_io_put(io); + io = NULL; + + if (error) + break; + + /* Reset head of data buffer */ + ctx_data_seek_check(cache->owner, data, + ctx_data_seek_begin, 0); + + for (i_page = 0; i_page < count; i_page++, i++) { + if (!page) { + page = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (!page) { + /* Allocation error */ + error = -ENOMEM; + break; + } + } + + ctx_data_rd_check(cache->owner, page, data, PAGE_SIZE); + + error = env_memcmp(zpage, PAGE_SIZE, page, + PAGE_SIZE, &cmp); + if (error) + break; + + if (cmp == 0) { + OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i); + continue; + } + + OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i); + + ctrl->pages[i] = page; + page = NULL; + + env_atomic_inc(&ctrl->count); + } + + if (error) + break; + } + + env_free(zpage); + env_free(page); + ctx_data_free(cache->owner, data); + + return error; +} + +/* +* RAM DYNAMIC Implementation - Flush all +*/ +/* + * RAM Implementation - Flush IO callback - Fill page + */ +static int _raw_dynamic_flush_all_fill(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + uint32_t raw_page; + struct ocf_metadata_raw *raw = (struct ocf_metadata_raw *)context; + struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv; + + ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page)); + + raw_page = page - raw->ssd_pages_offset; + + if (ctrl->pages[raw_page]) { + OCF_DEBUG_PARAM(cache, "Page = %u", raw_page); + ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page], + PAGE_SIZE); + } else { + OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page); + /* Page was not allocated before set only zeros */ + ctx_data_zero_check(cache->owner, data, PAGE_SIZE); + } + + return 0; +} + +int raw_dynamic_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + OCF_DEBUG_TRACE(cache); + return metadata_io_write_i(cache, raw->ssd_pages_offset, + raw->ssd_pages, _raw_dynamic_flush_all_fill, raw); +} + +/* + * RAM DYNAMIC Implementation - Mark to Flush + */ +void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) +{ + ENV_BUG(); +} + +/* + * RAM DYNAMIC Implementation - Do flushing asynchronously + */ +int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete) +{ + ENV_BUG(); + return -ENOSYS; +} diff --git a/src/metadata/metadata_raw_dynamic.h b/src/metadata/metadata_raw_dynamic.h new file mode 100644 index 0000000..be51bb0 --- /dev/null +++ b/src/metadata/metadata_raw_dynamic.h @@ -0,0 +1,106 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_RAW_DYNAMIC_H__ +#define __METADATA_RAW_DYNAMIC_H__ + +/** + * @file metadata_raw_dynamic.h + * @brief Metadata RAW container implementation for dynamic numbers of elements + */ + +/* + * RAW DYNAMIC - Initialize + */ +int raw_dynamic_init(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC - De-Initialize + */ +int raw_dynamic_deinit(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC - Get size of memory footprint of this RAW metadata container + */ +size_t raw_dynamic_size_of(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC Implementation - Size on SSD + */ +uint32_t raw_dynamic_size_on_ssd(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC Implementation - Checksum + */ +uint32_t raw_dynamic_checksum(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC - Get specified entry + */ +int raw_dynamic_get(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size); + +/* + * RAW DYNAMIC - Set specified entry + */ +int raw_dynamic_set(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + void *data, uint32_t size); + +/* + * RAW DYNAMIC - Read only access for specified entry + */ +const void *raw_dynamic_rd_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size); + +/* + * RAW DYNAMIC - Write access for specified entry + */ +void *raw_dynamic_wr_access(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line, + uint32_t size); + +/* + * RAW DYNAMIC - Flush specified entry + */ +int raw_dynamic_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line); + +/* + * RAW DYNAMIC - Load all metadata of this RAW metadata container + * from cache device + */ +int raw_dynamic_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC - Flush all metadata of this RAW metadata container + * to cache device + */ +int raw_dynamic_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW DYNAMIC - Mark specified entry to be flushed + */ +void raw_dynamic_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); + +/* + * DYNAMIC Implementation - Do Flush Asynchronously + */ +int raw_dynamic_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete); + + +#endif /* METADATA_RAW_H_ */ diff --git a/src/metadata/metadata_raw_volatile.c b/src/metadata/metadata_raw_volatile.c new file mode 100644 index 0000000..64dcc3c --- /dev/null +++ b/src/metadata/metadata_raw_volatile.c @@ -0,0 +1,74 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_hash.h" +#include "metadata_raw.h" +#include "metadata_io.h" +#include "metadata_raw_volatile.h" + +/* + * RAW volatile Implementation - Size on SSD + */ +uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return 0; +} + +/* + * RAW volatile Implementation - Checksum + */ +uint32_t raw_volatile_checksum(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return 0; +} + +/* + * RAW volatile Implementation - Flush specified element to SSD + */ +int raw_volatile_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line) +{ + return 0; +} + +/* + * RAW volatile Implementation - Load all metadata elements from SSD + */ +int raw_volatile_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return -ENOTSUP; +} + +/* + * RAM Implementation - Flush all elements + */ +int raw_volatile_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw) +{ + return 0; +} + +/* + * RAM RAM Implementation - Mark to Flush + */ +void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop) +{ +} + +/* + * RAM RAM Implementation - Do Flush asynchronously + */ +int raw_volatile_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete) +{ + complete(rq, 0); + return 0; +} diff --git a/src/metadata/metadata_raw_volatile.h b/src/metadata/metadata_raw_volatile.h new file mode 100644 index 0000000..ac93950 --- /dev/null +++ b/src/metadata/metadata_raw_volatile.h @@ -0,0 +1,52 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_RAW_VOLATILE_H__ +#define __METADATA_RAW_VOLATILE_H__ + +/* + * RAW volatile Implementation - Size on SSD + */ +uint32_t raw_volatile_size_on_ssd(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW volatile Implementation - Checksum + */ +uint32_t raw_volatile_checksum(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW volatile Implementation - Flush specified element to SSD + */ +int raw_volatile_flush(struct ocf_cache *cache, + struct ocf_metadata_raw *raw, ocf_cache_line_t line); + +/* + * RAW volatile Implementation - Load all metadata elements from SSD + */ +int raw_volatile_load_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAW volatile Implementation - Flush all elements + */ +int raw_volatile_flush_all(struct ocf_cache *cache, + struct ocf_metadata_raw *raw); + +/* + * RAM RAW volatile Implementation - Mark to Flush + */ +void raw_volatile_flush_mark(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, uint8_t stop); + +/* + * RAM RAW volatile Implementation - Do Flush asynchronously + */ +int raw_volatile_flush_do_asynch(struct ocf_cache *cache, + struct ocf_request *rq, struct ocf_metadata_raw *raw, + ocf_end_t complete); + +#endif /* __METADATA_RAW_VOLATILE_H__ */ diff --git a/src/metadata/metadata_status.h b/src/metadata/metadata_status.h new file mode 100644 index 0000000..021e12a --- /dev/null +++ b/src/metadata/metadata_status.h @@ -0,0 +1,435 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_STATUS_H__ +#define __METADATA_STATUS_H__ + +#include "../ocf_request.h" +/******************************************************************************* + * Dirty + ******************************************************************************/ + +static inline void metadata_init_status_bits(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + OCF_METADATA_BITS_LOCK_WR(); + + cache->metadata.iface.clear_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + cache->metadata.iface.clear_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline bool metadata_test_dirty_all(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, true); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_dirty(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, false); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline void metadata_set_dirty(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.set_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_clear_dirty(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline bool metadata_test_and_clear_dirty( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_WR(); + test = cache->metadata.iface.test_and_clear_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, false); + OCF_METADATA_BITS_UNLOCK_WR(); + + return test; +} + +static inline bool metadata_test_and_set_dirty(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_WR(); + test = cache->metadata.iface.test_and_set_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, false); + OCF_METADATA_BITS_UNLOCK_WR(); + + return test; +} + +/******************************************************************************* + * Dirty - Sector Implementation + ******************************************************************************/ + +static inline bool metadata_test_dirty_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_dirty(cache, line, + start, stop, false); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_dirty_all_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_dirty(cache, line, + start, stop, true); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_dirty_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + return metadata_test_dirty_sec(cache, line, pos, pos); +} + +static inline bool metadata_test_dirty_out_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_out_dirty(cache, line, start, stop); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline void metadata_set_dirty_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.set_dirty(cache, line, start, stop); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_clear_dirty_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_dirty(cache, line, start, stop); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_set_dirty_sec_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.set_dirty(cache, line, pos, pos); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_clear_dirty_sec_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_dirty(cache, line, pos, pos); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline bool metadata_test_and_clear_dirty_sec( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop) +{ + bool test = false; + + OCF_METADATA_BITS_LOCK_WR(); + test = cache->metadata.iface.test_and_clear_dirty(cache, line, + start, stop, false); + OCF_METADATA_BITS_UNLOCK_WR(); + + return test; +} + +/* + * Marks given cache line's bits as clean + * + * @return true if the cache line was dirty and became clean + * @return false for other cases + */ +static inline bool metadata_clear_dirty_sec_changed( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop) +{ + bool was_dirty, is_dirty = false; + + OCF_METADATA_BITS_LOCK_WR(); + + was_dirty = cache->metadata.iface.test_dirty(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, + false); + + if (was_dirty) { + is_dirty = cache->metadata.iface.clear_dirty(cache, line, + start, stop); + } + + OCF_METADATA_BITS_UNLOCK_WR(); + + return was_dirty && !is_dirty; +} + +/* + * Marks given cache line's bits as dirty + * + * @return true if the cache line was clean and became dirty + * @return false if the cache line was dirty before marking bits + */ +static inline bool metadata_set_dirty_sec_changed( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop) +{ + bool was_dirty; + + OCF_METADATA_BITS_LOCK_WR(); + was_dirty = cache->metadata.iface.set_dirty(cache, line, start, stop); + OCF_METADATA_BITS_UNLOCK_WR(); + + return !was_dirty; +} + +/******************************************************************************* + * Valid + ******************************************************************************/ + +static inline bool metadata_test_valid_any(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, false); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_valid(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, true); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline void metadata_set_valid(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.set_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_clear_valid(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline bool metadata_test_and_clear_valid( + struct ocf_cache *cache, ocf_cache_line_t line) +{ + bool test = false; + + OCF_METADATA_BITS_LOCK_WR(); + test = cache->metadata.iface.test_and_clear_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, true); + OCF_METADATA_BITS_UNLOCK_WR(); + + return test; +} + +static inline bool metadata_test_and_set_valid(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + bool test = false; + + OCF_METADATA_BITS_LOCK_WR(); + test = cache->metadata.iface.test_and_set_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, true); + OCF_METADATA_BITS_UNLOCK_WR(); + + return test; +} + +/******************************************************************************* + * Valid - Sector Implementation + ******************************************************************************/ + +static inline bool metadata_test_valid_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + bool test; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_valid(cache, line, + start, stop, true); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_valid_any_out_sec( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop) +{ + bool test = false; + + OCF_METADATA_BITS_LOCK_RD(); + test = cache->metadata.iface.test_out_valid(cache, line, + start, stop); + OCF_METADATA_BITS_UNLOCK_RD(); + + return test; +} + +static inline bool metadata_test_valid_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + return metadata_test_valid_sec(cache, line, pos, pos); +} + +/* + * Marks given cache line's bits as valid + * + * @return true if any of the cache line's bits was valid before this operation + * @return false if the cache line was invalid (all bits invalid) before this + * operation + */ +static inline bool metadata_set_valid_sec_changed( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop) +{ + bool was_any_valid; + + OCF_METADATA_BITS_LOCK_WR(); + was_any_valid = cache->metadata.iface.set_valid(cache, line, + start, stop); + OCF_METADATA_BITS_UNLOCK_WR(); + + return !was_any_valid; +} + +static inline void metadata_clear_valid_sec(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_valid(cache, line, start, stop); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_clear_valid_sec_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.clear_valid(cache, line, pos, pos); + OCF_METADATA_BITS_UNLOCK_WR(); +} + +static inline void metadata_set_valid_sec_one(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t pos) +{ + OCF_METADATA_BITS_LOCK_WR(); + cache->metadata.iface.set_valid(cache, line, pos, pos); + OCF_METADATA_BITS_UNLOCK_WR(); +} +/* + * Marks given cache line's bits as invalid + * + * @return true if any of the cache line's bits was valid and the cache line + * became invalid (all bits invalid) after the operation + * @return false in other cases + */ +static inline bool metadata_clear_valid_sec_changed( + struct ocf_cache *cache, ocf_cache_line_t line, + uint8_t start, uint8_t stop, bool *is_valid) +{ + bool was_any_valid; + + OCF_METADATA_BITS_LOCK_WR(); + + was_any_valid = cache->metadata.iface.test_valid(cache, line, + cache->metadata.settings.sector_start, + cache->metadata.settings.sector_end, false); + + *is_valid = cache->metadata.iface.clear_valid(cache, line, + start, stop); + + OCF_METADATA_BITS_UNLOCK_WR(); + + return was_any_valid && !*is_valid; +} + +#endif /* METADATA_STATUS_H_ */ diff --git a/src/metadata/metadata_structs.h b/src/metadata/metadata_structs.h new file mode 100644 index 0000000..dd71593 --- /dev/null +++ b/src/metadata/metadata_structs.h @@ -0,0 +1,491 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_STRUCTS_H__ +#define __METADATA_STRUCTS_H__ + +#include "../eviction/eviction.h" +#include "../cleaning/cleaning.h" +#include "../ocf_request.h" + +/** + * @file metadata_priv.h + * @brief Metadata private structures + */ + +/** + * @brief Metadata shutdown status + */ +enum ocf_metadata_shutdown_status { + ocf_metadata_clean_shutdown = 1, /*!< OCF shutdown graceful*/ + ocf_metadata_dirty_shutdown = 0, /*!< Dirty OCF shutdown*/ + ocf_metadata_detached = 2, /*!< Cache device detached */ +}; + +/** + * @brief Asynchronous metadata request completed + * + * @param cache - Cache instance + * @param error - Indicates operation result, 0 - Finished successfully + * @param line - cache line for which completion is signaled + * @param context - Context of metadata request + */ +typedef void (*ocf_metadata_asynch_hndl)(struct ocf_cache *cache, + int error, ocf_cache_line_t line, void *context); + +typedef void (*ocf_metadata_asynch_flush_hndl)(void *context, int error); + +/* + * Metadata cache line location on pages interface + */ +struct ocf_metadata_layout_iface { + + /** + * @brief Initialize freelist partition + * + * @param cache - Cache instance + */ + + void (*init_freelist)(struct ocf_cache *cache); + + /** + * This function is mapping collision index to appropriate cache line + * (logical cache line to physical one mapping). + * + * It is necessary because we want to generate sequential workload with + * data to cache device. + * Our collision list, for example, looks: + * 0 3 6 9 + * 1 4 7 10 + * 2 5 8 + * All collision index in each column is on the same page + * on cache device. We don't want send request x times to the same + * page. To don't do it we use collision index by row, but in this + * case we can't use collision index directly as cache line, + * because we will generate non sequential workload (we will write + * pages: 0 -> 3 -> 6 ...). To map collision index in correct way + * we use this function. + * + * After use this function, collision index in the above array + * corresponds with below cache line: + * 0 1 2 3 + * 4 5 6 7 + * 8 9 10 + * + * @param cache - cache instance + * @param idx - index in collision list + * @return mapped cache line + */ + ocf_cache_line_t (*lg2phy)(struct ocf_cache *cache, + ocf_cache_line_t coll_idx); + + /** + * @brief Map physical cache line on cache device to logical one + * @note This function is the inverse of map_coll_idx_to_cache_line + * + * @param cache Cache instance + * @param phy Physical cache line of cache device + * @return Logical cache line + */ + ocf_cache_line_t (*phy2lg)(struct ocf_cache *cache, + ocf_cache_line_t phy); +}; + +/** + * OCF Metadata interface + */ +struct ocf_metadata_iface { + /** + * @brief Initialize metadata + * + * @param cache - Cache instance + * @param cache_line_size - Cache line size + * @return 0 - Operation success otherwise failure + */ + int (*init)(struct ocf_cache *cache, + ocf_cache_line_size_t cache_line_size); + + /** + * @brief Initialize variable size metadata sections + * + * @param cache - Cache instance + * @param device_size - Cache size in bytes + * @param cache_line_size - Cache line size + * @param layout Metadata layout + * @return 0 - Operation success otherwise failure + */ + int (*init_variable_size)(struct ocf_cache *cache, uint64_t device_size, + ocf_cache_line_size_t cache_line_size, + ocf_metadata_layout_t layout); + + /** + * @brief Metadata cache line location on pages interface + */ + const struct ocf_metadata_layout_iface *layout_iface; + + /** + * @brief Initialize hash table + * + * @param cache - Cache instance + */ + void (*init_hash_table)(struct ocf_cache *cache); + + /** + * @brief De-Initialize metadata + * + * @param cache - Cache instance + */ + void (*deinit)(struct ocf_cache *cache); + + /** + * @brief De-Initialize variable size metadata segments + * + * @param cache - Cache instance + */ + void (*deinit_variable_size)(struct ocf_cache *cache); + + /** + * @brief Get memory footprint + * + * @param cache - Cache instance + * @return 0 - memory footprint + */ + size_t (*size_of)(struct ocf_cache *cache); + + /** + * @brief Get amount of pages required for metadata + * + * @param cache - Cache instance + * @return Pages required for store metadata on cache device + */ + ocf_cache_line_t (*pages)(struct ocf_cache *cache); + + /** + * @brief Get amount of cache lines + * + * @param cache - Cache instance + * @return Amount of cache lines (cache device lines - metadata space) + */ + ocf_cache_line_t (*cachelines)(struct ocf_cache *cache); + + /** + * @brief Load metadata from cache device + * + * @param[in] cache - Cache instance + * @return 0 - Operation success otherwise failure + */ + int (*load_all)(struct ocf_cache *cache); + + /** + * @brief Load metadata from recovery procedure + * recovery + * @param[in] cache - Cache instance + * @return 0 - Operation success otherwise failure + */ + int (*load_recovery)(struct ocf_cache *cache); + + /** + * @brief Flush metadata into cahce cache + * + * @param[in] cache - Cache instance + * @return 0 - Operation success otherwise failure + */ + int (*flush_all)(struct ocf_cache *cache); + + /** + * @brief Flush metadata for specified cache line + * + * @param[in] cache - Cache instance + * @param[in] line - cache line which to be flushed + */ + void (*flush)(struct ocf_cache *cache, ocf_cache_line_t line); + + /** + * @brief Mark specified cache line to be flushed + * + * @param[in] cache - Cache instance + * @param[in] line - cache line which to be flushed + */ + void (*flush_mark)(struct ocf_cache *cache, struct ocf_request *rq, + uint32_t map_idx, int to_state, uint8_t start, + uint8_t stop); + + /** + * @brief Flush marked cache lines asynchronously + * + * @param cache - Cache instance + * @param queue - I/O queue to which metadata flush should be submitted + * @param remaining - request remaining + * @param complete - flushing request callback + * @param context - context that will be passed into callback + */ + void (*flush_do_asynch)(struct ocf_cache *cache, + struct ocf_request *rq, ocf_end_t complete); + + + /* TODO Provide documentation below */ + + enum ocf_metadata_shutdown_status (*get_shutdown_status)( + struct ocf_cache *cache); + + int (*set_shutdown_status)(struct ocf_cache *cache, + enum ocf_metadata_shutdown_status shutdown_status); + + int (*load_superblock)(struct ocf_cache *cache); + + int (*flush_superblock)(struct ocf_cache *cache); + + uint64_t (*get_reserved_lba)(struct ocf_cache *cache); + + /** + * @brief Get eviction policy + * + * @param[in] cache - Cache instance + * @param[in] line - cache line for which eviction policy is requested + * @param[out] eviction_policy - Eviction policy + */ + void (*get_eviction_policy)(struct ocf_cache *cache, + ocf_cache_line_t line, + union eviction_policy_meta *eviction_policy); + + /** + * @brief Set eviction policy + * + * @param[in] cache - Cache instance + * @param[in] line - Eviction policy values which will be stored in + * metadata service + * @param[out] eviction_policy - Eviction policy + */ + void (*set_eviction_policy)(struct ocf_cache *cache, + ocf_cache_line_t line, + union eviction_policy_meta *eviction_policy); + + /** + * @brief Flush eviction policy for given cache line + * + * @param[in] cache - Cache instance + * @param[in] line - Cache line for which flushing has to be performed + */ + void (*flush_eviction_policy)(struct ocf_cache *cache, + ocf_cache_line_t line); + + + /** + * @brief Get cleaning policy + * + * @param[in] cache - Cache instance + * @param[in] line - cache line for which cleaning policy is requested + * @param[out] cleaning_policy - Cleaning policy + */ + void (*get_cleaning_policy)(struct ocf_cache *cache, + ocf_cache_line_t line, + struct cleaning_policy_meta *cleaning_policy); + + /** + * @brief Set cleaning policy + * + * @param[in] cache - Cache instance + * @param[in] line + * @param[in] cleaning_policy - Cleaning policy values which will be + * stored in metadata service + */ + void (*set_cleaning_policy)(struct ocf_cache *cache, + ocf_cache_line_t line, + struct cleaning_policy_meta *cleaning_policy); + + /** + * @brief Flush cleaning policy for given cache line + * + * @param[in] cache - Cache instance + * @param[in] line - Cache line for which flushing has to be performed + */ + void (*flush_cleaning_policy)(struct ocf_cache *cache, + ocf_cache_line_t line); + + /** + * @brief Get hash table for specified index + * + * @param[in] cache - Cache instance + * @param[in] index - Hash table index + * @return Cache line value under specified hash table index + */ + ocf_cache_line_t (*get_hash)(struct ocf_cache *cache, + ocf_cache_line_t index); + + /** + * @brief Set hash table value for specified index + * + * @param[in] cache - Cache instance + * @param[in] index - Hash table index + * @param[in] line - Cache line value to be set under specified hash + * table index + */ + void (*set_hash)(struct ocf_cache *cache, + ocf_cache_line_t index, ocf_cache_line_t line); + + /** + * @brief Flush has table for specified index + * + * @param[in] cache - Cache instance + * @param[in] index - Hash table index + */ + void (*flush_hash)(struct ocf_cache *cache, + ocf_cache_line_t index); + + /** + * @brief Get hash table entries + * + * @param[in] cache - Cache instance + * @return Hash table entries + */ + ocf_cache_line_t (*entries_hash)(struct ocf_cache *cache); + + /* TODO Provide documentation below */ + void (*set_core_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t core_id, + uint64_t core_sector); + + void (*get_core_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t *core_id, + uint64_t *core_sector); + + ocf_core_id_t (*get_core_id)(struct ocf_cache *cache, + ocf_cache_line_t line); + + uint64_t (*get_core_sector)(struct ocf_cache *cache, + ocf_cache_line_t line); + + void (*get_core_and_part_id)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_core_id_t *core_id, + ocf_part_id_t *part_id); + + struct ocf_metadata_uuid *(*get_core_uuid)( + struct ocf_cache *cache, ocf_core_id_t core_id); + + void (*set_collision_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t next, + ocf_cache_line_t prev); + + void (*get_collision_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t *next, + ocf_cache_line_t *prev); + + void (*set_collision_next)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t next); + + void (*set_collision_prev)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t prev); + + ocf_cache_line_t (*get_collision_next)(struct ocf_cache *cache, + ocf_cache_line_t line); + + ocf_cache_line_t (*get_collision_prev)(struct ocf_cache *cache, + ocf_cache_line_t line); + + ocf_part_id_t (*get_partition_id)(struct ocf_cache *cache, + ocf_cache_line_t line); + + ocf_cache_line_t (*get_partition_next)(struct ocf_cache *cache, + ocf_cache_line_t line); + + ocf_cache_line_t (*get_partition_prev)(struct ocf_cache *cache, + ocf_cache_line_t line); + + void (*get_partition_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_part_id_t *part_id, + ocf_cache_line_t *next_line, + ocf_cache_line_t *prev_line); + + void (*set_partition_next)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t next_line); + + void (*set_partition_prev)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_cache_line_t prev_line); + + void (*set_partition_info)(struct ocf_cache *cache, + ocf_cache_line_t line, ocf_part_id_t part_id, + ocf_cache_line_t next_line, ocf_cache_line_t prev_line); + + const struct ocf_metadata_status* + (*rd_status_access)(struct ocf_cache *cache, + ocf_cache_line_t line); + + struct ocf_metadata_status* + (*wr_status_access)(struct ocf_cache *cache, + ocf_cache_line_t line); + + bool (*test_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); + + bool (*test_out_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*clear_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*set_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*test_and_set_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); + + bool (*test_and_clear_dirty)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); + + + bool (*test_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); + + bool (*test_out_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*clear_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*set_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop); + + bool (*test_and_set_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); + + bool (*test_and_clear_valid)(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t start, uint8_t stop, bool all); +}; + +struct ocf_cache_line_settings { + ocf_cache_line_size_t size; + uint64_t sector_count; + uint64_t sector_start; + uint64_t sector_end; +}; + +/** + * @brief Metadata control structure + */ +struct ocf_metadata { + const struct ocf_metadata_iface iface; + /*!< Metadata service interface */ + + void *iface_priv; + /*!< Private data of metadata service interface */ + + const struct ocf_cache_line_settings settings; + /*!< Cache line configuration */ + + bool is_volatile; + /*!< true if metadata used in volatile mode (RAM only) */ + + struct { + env_rwsem collision; /*!< lock for collision table */ + env_rwlock status; /*!< Fast lock for status bits */ + env_spinlock eviction; /*!< Fast lock for eviction policy */ + } lock; +}; + + +#define OCF_METADATA_RD 0 +#define OCF_METADATA_WR 1 + +#endif /* __METADATA_STRUCTS_H__ */ diff --git a/src/metadata/metadata_superblock.h b/src/metadata/metadata_superblock.h new file mode 100644 index 0000000..9283bbe --- /dev/null +++ b/src/metadata/metadata_superblock.h @@ -0,0 +1,93 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_SUPERBLOCK_H__ +#define __METADATA_SUPERBLOCK_H__ + +#define CACHE_MAGIC_NUMBER 0x187E1CA6 + +/** + * @brief OCF cache metadata configuration superblock + */ +struct ocf_superblock_config { + /** WARNING: Metadata probe disregards metadata version when + * checking if the cache is dirty - position of next two fields + * shouldn't change!! */ + uint8_t clean_shutdown; + uint8_t dirty_flushed; + uint32_t magic_number; + + uint32_t metadata_version; + + /* Currently set cache mode */ + ocf_cache_mode_t cache_mode; + + ocf_cache_line_t cachelines; + uint32_t valid_parts_no; + + ocf_cache_line_size_t line_size; + ocf_metadata_layout_t metadata_layout; + uint32_t core_obj_count; + + unsigned long valid_object_bitmap[(OCF_CORE_MAX / + (sizeof(unsigned long) * 8)) + 1]; + + ocf_cleaning_t cleaning_policy_type; + struct cleaning_policy_config cleaning[CLEANING_POLICY_TYPE_MAX]; + + ocf_eviction_t eviction_policy_type; + + /* Current core sequence number */ + ocf_core_id_t curr_core_seq_no; + + struct ocf_user_part_config user_parts[OCF_IO_CLASS_MAX + 1]; + + /* + * Checksum for each metadata region. + * This field has to be the last one! + */ + uint32_t checksum[metadata_segment_max]; +}; + +/** + * @brief OCF cache metadata runtime superblock + */ +struct ocf_superblock_runtime { + struct ocf_part freelist_part; + + struct ocf_user_part_runtime user_parts[OCF_IO_CLASS_MAX + 1]; + + uint32_t cleaning_thread_access; +}; + +static inline int ocf_metadata_set_shutdown_status( + struct ocf_cache *cache, + enum ocf_metadata_shutdown_status shutdown_status) +{ + return cache->metadata.iface.set_shutdown_status(cache, + shutdown_status); +} + +static inline int ocf_metadata_load_superblock(struct ocf_cache *cache) +{ + return cache->metadata.iface.load_superblock(cache); +} + +static inline +int ocf_metadata_flush_superblock(struct ocf_cache *cache) +{ + if (cache->device) + return cache->metadata.iface.flush_superblock(cache); + + return 0; +} + +static inline uint64_t ocf_metadata_get_reserved_lba( + struct ocf_cache *cache) +{ + return cache->metadata.iface.get_reserved_lba(cache); +} + +#endif /* METADATA_SUPERBLOCK_H_ */ diff --git a/src/metadata/metadata_updater.c b/src/metadata/metadata_updater.c new file mode 100644 index 0000000..da52c85 --- /dev/null +++ b/src/metadata/metadata_updater.c @@ -0,0 +1,152 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "metadata.h" +#include "metadata_io.h" +#include "metadata_updater_priv.h" +#include "../ocf_priv.h" +#include "../engine/engine_common.h" +#include "../ocf_cache_priv.h" +#include "../ocf_ctx_priv.h" +#include "../utils/utils_io.h" +#include "../utils/utils_allocator.h" + +int ocf_metadata_updater_init(ocf_cache_t cache) +{ + ocf_metadata_updater_t mu = &cache->metadata_updater; + struct ocf_metadata_io_syncher *syncher = &mu->syncher; + + INIT_LIST_HEAD(&syncher->in_progress_head); + INIT_LIST_HEAD(&syncher->pending_head); + env_mutex_init(&syncher->lock); + + return ctx_metadata_updater_init(cache->owner, mu); +} + +void ocf_metadata_updater_kick(ocf_cache_t cache) +{ + ctx_metadata_updater_kick(cache->owner, &cache->metadata_updater); +} + +void ocf_metadata_updater_stop(ocf_cache_t cache) +{ + ctx_metadata_updater_stop(cache->owner, &cache->metadata_updater); +} + +void ocf_metadata_updater_set_priv(ocf_metadata_updater_t mu, void *priv) +{ + OCF_CHECK_NULL(mu); + mu->priv = priv; +} + +void *ocf_metadata_updater_get_priv(ocf_metadata_updater_t mu) +{ + OCF_CHECK_NULL(mu); + return mu->priv; +} + +ocf_cache_t ocf_metadata_updater_get_cache(ocf_metadata_updater_t mu) +{ + OCF_CHECK_NULL(mu); + return container_of(mu, struct ocf_cache, metadata_updater); +} + +static int _metadata_updater_iterate_in_progress(ocf_cache_t cache, + struct metadata_io_request *new_req) +{ + struct metadata_io_request_asynch *a_req; + struct ocf_metadata_io_syncher *syncher = + &cache->metadata_updater.syncher; + struct metadata_io_request *curr, *temp; + + list_for_each_entry_safe(curr, temp, &syncher->in_progress_head, list) { + if (env_atomic_read(&curr->finished)) { + a_req = curr->asynch; + ENV_BUG_ON(!a_req); + + list_del(&curr->list); + + if (env_atomic_dec_return(&a_req->req_active) == 0) { + OCF_REALLOC_DEINIT(&a_req->reqs, + &a_req->reqs_limit); + env_free(a_req); + } + continue; + } + if (new_req) { + /* If request specified, check if overlap occurs. */ + if (ocf_io_overlaps(new_req->page, new_req->count, + curr->page, curr->count)) { + return 1; + } + } + } + + return 0; +} + +int metadata_updater_check_overlaps(ocf_cache_t cache, + struct metadata_io_request *req) +{ + struct ocf_metadata_io_syncher *syncher = + &cache->metadata_updater.syncher; + int ret; + + env_mutex_lock(&syncher->lock); + + ret = _metadata_updater_iterate_in_progress(cache, req); + + /* Either add it to in-progress list or pending list for deferred + * execution. + */ + if (ret == 0) + list_add_tail(&req->list, &syncher->in_progress_head); + else + list_add_tail(&req->list, &syncher->pending_head); + + env_mutex_unlock(&syncher->lock); + + return ret; +} + +uint32_t ocf_metadata_updater_run(ocf_metadata_updater_t mu) +{ + struct metadata_io_request *curr, *temp; + struct ocf_metadata_io_syncher *syncher; + ocf_cache_t cache; + int ret; + + OCF_CHECK_NULL(mu); + + cache = ocf_metadata_updater_get_cache(mu); + syncher = &cache->metadata_updater.syncher; + + env_mutex_lock(&syncher->lock); + if (list_empty(&syncher->pending_head)) { + /* + * If pending list is empty, we iterate over in progress + * list to free memory used by finished requests. + */ + _metadata_updater_iterate_in_progress(cache, NULL); + env_mutex_unlock(&syncher->lock); + env_cond_resched(); + return 0; + } + list_for_each_entry_safe(curr, temp, &syncher->pending_head, list) { + ret = _metadata_updater_iterate_in_progress(cache, curr); + if (ret == 0) { + /* Move to in-progress list and kick the workers */ + list_move_tail(&curr->list, &syncher->in_progress_head); + } + env_mutex_unlock(&syncher->lock); + if (ret == 0) + ocf_engine_push_rq_front(&curr->fl_req, true); + env_cond_resched(); + env_mutex_lock(&syncher->lock); + } + env_mutex_unlock(&syncher->lock); + + return 0; +} diff --git a/src/metadata/metadata_updater_priv.h b/src/metadata/metadata_updater_priv.h new file mode 100644 index 0000000..5ec3176 --- /dev/null +++ b/src/metadata/metadata_updater_priv.h @@ -0,0 +1,33 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __METADATA_UPDATER_PRIV_H__ +#define __METADATA_UPDATER_PRIV_H__ + +#include "../ocf_def_priv.h" +#include "metadata_io.h" + +struct ocf_metadata_updater { + /* Metadata flush synchronizer context */ + struct ocf_metadata_io_syncher { + struct list_head in_progress_head; + struct list_head pending_head; + env_mutex lock; + } syncher; + + void *priv; +}; + + +int metadata_updater_check_overlaps(ocf_cache_t cache, + struct metadata_io_request *req); + +int ocf_metadata_updater_init(struct ocf_cache *cache); + +void ocf_metadata_updater_kick(struct ocf_cache *cache); + +void ocf_metadata_updater_stop(struct ocf_cache *cache); + +#endif /* __METADATA_UPDATER_PRIV_H__ */ diff --git a/src/mngt/ocf_mngt_cache.c b/src/mngt/ocf_mngt_cache.c new file mode 100644 index 0000000..ab4340e --- /dev/null +++ b/src/mngt/ocf_mngt_cache.c @@ -0,0 +1,2121 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../ocf_core_priv.h" +#include "../ocf_queue_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_part.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_device.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_utils.h" +#include "../concurrency/ocf_concurrency.h" +#include "../eviction/ops.h" +#include "../ocf_ctx_priv.h" +#include "../cleaning/cleaning.h" + +#define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device) + +static struct ocf_cache *_ocf_mngt_get_cache(ocf_ctx_t owner, + ocf_cache_id_t cache_id) +{ + struct ocf_cache *iter = NULL; + struct ocf_cache *cache = NULL; + + list_for_each_entry(iter, &owner->caches, list) { + if (iter->cache_id == cache_id) { + cache = iter; + break; + } + } + + return cache; +} + +#define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \ + "previous cache state (Warning: data corruption may happen)" \ + "\nOr initialize your cache using --force option. " \ + "Warning: All dirty data will be lost!\n" + +#define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \ + "Restart with --load or --force option\n" + +/** + * @brief Helpful function to start cache + */ +struct ocf_cachemng_init_params { + bool metadata_volatile; + + ocf_cache_id_t id; + /*!< cache id */ + + ocf_ctx_t ctx; + /*!< OCF context */ + + struct ocf_cache *cache; + /*!< cache that is being initialized */ + + uint8_t locked; + /*!< Keep cache locked */ + + /** + * @brief initialization state (in case of error, it is used to know + * which assets have to be deallocated in premature exit from function + */ + struct { + bool cache_alloc : 1; + /*!< cache is allocated and added to list */ + + bool metadata_inited : 1; + /*!< Metadata is inited to valid state */ + + bool queues_inited : 1; + + bool cache_locked : 1; + /*!< Cache has been locked */ + + bool io_queues_started : 1; + /*!< queues are started */ + } flags; + + struct ocf_metadata_init_params { + ocf_cache_line_size_t line_size; + /*!< Metadata cache line size */ + + ocf_metadata_layout_t layout; + /*!< Metadata layout (striping/sequential) */ + + ocf_cache_mode_t cache_mode; + /*!< cache mode */ + } metadata; +}; + +struct ocf_cachemng_attach_params { + struct ocf_cache *cache; + /*!< cache that is being initialized */ + + struct ocf_data_obj_uuid uuid; + /*!< Caching device data object UUID */ + + uint8_t device_type; + /*!< data object (block device) type */ + + uint64_t device_size; + /*!< size of the device in cache lines */ + + uint8_t force; + /*!< if force switch was passed in CLI (if this flag is set, + * routine overrides some safety checks, that normally prevent + * completion of initialization procedure + */ + + uint8_t load; + /*!< 1 if load from attached device is requested */ + + bool perform_test; + /*!< Test cache before starting */ + + /** + * @brief initialization state (in case of error, it is used to know + * which assets have to be deallocated in premature exit from function + */ + struct { + bool device_alloc : 1; + /*!< data structure allocated */ + + bool uuid_alloc : 1; + /*!< uuid for cache device is allocated */ + + bool attached_metadata_inited : 1; + /*!< attached metadata sections initialized */ + + bool device_opened : 1; + /*!< underlying device object is open */ + + bool cleaner_started : 1; + /*!< Cleaner has been started */ + + bool cores_opened : 1; + /*!< underlying cores are opened (happens only during + * load or recovery + */ + + bool concurrency_inited : 1; + } flags; + + struct { + ocf_cache_line_size_t line_size; + /*!< Metadata cache line size */ + + ocf_metadata_layout_t layout; + /*!< Metadata layout (striping/sequential) */ + + ocf_cache_mode_t cache_mode; + /*!< cache mode */ + + enum ocf_metadata_shutdown_status shutdown_status; + /*!< dirty or clean */ + + uint8_t dirty_flushed; + /*!< is dirty data fully flushed */ + + int status; + /*!< metadata retrieval status (nonzero is sign of an error + * during recovery/load but is non issue in case of clean init + */ + } metadata; + + uint64_t min_free_ram; + /*!< Minimum free RAM required to start cache. Set during + * cache start procedure + */ +}; + +static ocf_cache_id_t _ocf_mngt_cache_find_free_id(ocf_ctx_t owner) +{ + ocf_cache_id_t id = OCF_CACHE_ID_INVALID; + + for (id = OCF_CACHE_ID_MIN; id <= OCF_CACHE_ID_MAX; id++) { + if (!_ocf_mngt_get_cache(owner, id)) + return id; + } + + return OCF_CACHE_ID_INVALID; +} + +static void __init_hash_table(struct ocf_cache *cache) +{ + /* Initialize hash table*/ + ocf_metadata_init_hash_table(cache); +} + +static void __init_freelist(struct ocf_cache *cache) +{ + /* Initialize free list partition*/ + ocf_metadata_init_freelist_partition(cache); +} + +static void __init_partitions(struct ocf_cache *cache) +{ + ocf_part_id_t i_part; + + /* Init default Partition */ + ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_DEFAULT, + "Unclassified", 0, PARTITION_SIZE_MAX, + OCF_IO_CLASS_PRIO_LOWEST, true)); + + /* Add other partition to the cache and make it as dummy */ + for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) { + if (i_part == PARTITION_DEFAULT) + continue; + + /* Init default Partition */ + ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, i_part, + "Inactive", 0, PARTITION_SIZE_MAX, + OCF_IO_CLASS_PRIO_LOWEST, false)); + } +} + +static void __init_partitions_attached(struct ocf_cache *cache) +{ + ocf_part_id_t part_id; + + for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) { + cache->user_parts[part_id].runtime->head = + cache->device->collision_table_entries; + cache->user_parts[part_id].runtime->curr_size = 0; + + ocf_eviction_initialize(cache, part_id); + } +} + +static void __init_cleaning_policy(struct ocf_cache *cache) +{ + ocf_cleaning_t cleaning_policy = ocf_cleaning_default; + int i; + + OCF_ASSERT_PLUGGED(cache); + + for (i = 0; i < ocf_cleaning_max; i++) { + if (cleaning_policy_ops[i].setup) + cleaning_policy_ops[i].setup(cache); + } + + cache->conf_meta->cleaning_policy_type = ocf_cleaning_default; + if (cleaning_policy_ops[cleaning_policy].initialize) + cleaning_policy_ops[cleaning_policy].initialize(cache, 1); +} + +static void __deinit_cleaning_policy(struct ocf_cache *cache) +{ + ocf_cleaning_t cleaning_policy; + + cleaning_policy = cache->conf_meta->cleaning_policy_type; + if (cleaning_policy_ops[cleaning_policy].deinitialize) + cleaning_policy_ops[cleaning_policy].deinitialize(cache); +} + +static void __init_eviction_policy(struct ocf_cache *cache, + ocf_eviction_t eviction) +{ + ENV_BUG_ON(eviction < 0 || eviction >= ocf_eviction_max); + + cache->conf_meta->eviction_policy_type = eviction; +} + +static void __init_cores(struct ocf_cache *cache) +{ + /* No core devices yet */ + cache->conf_meta->core_obj_count = 0; + ENV_BUG_ON(env_memset(cache->conf_meta->valid_object_bitmap, + sizeof(cache->conf_meta->valid_object_bitmap), 0)); +} + +static void __init_metadata_version(struct ocf_cache *cache) +{ + cache->conf_meta->metadata_version = METADATA_VERSION(); +} + +static void init_attached_data_structures(struct ocf_cache *cache, + ocf_eviction_t eviction_policy) +{ + /* Lock to ensure consistency */ + OCF_METADATA_LOCK_WR(); + __init_hash_table(cache); + __init_freelist(cache); + __init_partitions_attached(cache); + __init_cleaning_policy(cache); + __init_eviction_policy(cache, eviction_policy); + OCF_METADATA_UNLOCK_WR(); +} + +static void __reset_stats(struct ocf_cache *cache) +{ + int core_id; + ocf_part_id_t i; + + for (core_id = 0; core_id < OCF_CORE_MAX; core_id++) { + env_atomic_set(&cache->core_runtime_meta[core_id]. + cached_clines, 0); + env_atomic_set(&cache->core_runtime_meta[core_id]. + dirty_clines, 0); + env_atomic64_set(&cache->core_runtime_meta[core_id]. + dirty_since, 0); + + for (i = 0; i != OCF_IO_CLASS_MAX; i++) { + env_atomic_set(&cache->core_runtime_meta[core_id]. + part_counters[i].cached_clines, 0); + env_atomic_set(&cache->core_runtime_meta[core_id]. + part_counters[i].dirty_clines, 0); + } + } +} + +static void init_attached_data_structures_recovery(struct ocf_cache *cache) +{ + OCF_METADATA_LOCK_WR(); + __init_hash_table(cache); + __init_freelist(cache); + __init_partitions_attached(cache); + __reset_stats(cache); + __init_metadata_version(cache); + OCF_METADATA_UNLOCK_WR(); +} + +/** + * @brief initialize partitions for a caching device + */ +static void _init_partitions(ocf_cache_t cache) +{ + int clean_type = cache->conf_meta->cleaning_policy_type; + + if (clean_type >= 0 && clean_type < ocf_cleaning_max) { + /* Initialize policy with settings restored + * from metadata. + */ + if (cleaning_policy_ops[clean_type].initialize) + cleaning_policy_ops[clean_type].initialize(cache, 0); + } else { + ocf_cache_log(cache, log_warn, + "Wrong cleaning policy type=%d\n", clean_type); + } +} + +/**************************************************************** + * Function for removing all uninitialized core objects * + * from the cache instance. * + * Used in case of cache initialization errors. * + ****************************************************************/ +static void _ocf_mngt_close_all_uninitialized_cores( + struct ocf_cache *cache) +{ + ocf_data_obj_t obj; + int j, i; + + for (j = cache->conf_meta->core_obj_count, i = 0; j > 0; ++i) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + obj = &(cache->core_obj[i].obj); + ocf_data_obj_close(obj); + + --j; + + env_free(cache->core_obj[i].counters); + cache->core_obj[i].counters = NULL; + + env_bit_clear(i, cache->conf_meta->valid_object_bitmap); + } + + cache->conf_meta->core_obj_count = 0; +} + +/** + * @brief routine loading metadata from cache device + * - attempts to open all the underlying cores + */ +static int _ocf_mngt_init_instance_add_cores( + struct ocf_cachemng_attach_params *attach_params) +{ + struct ocf_cache *cache = attach_params->cache; + /* FIXME: This is temporary hack. Remove after storing name it meta. */ + char core_name[OCF_CORE_NAME_SIZE]; + int ret = -1, i; + uint64_t hd_lines = 0; + + OCF_ASSERT_PLUGGED(cache); + + ocf_cache_log(cache, log_info, "Loading cache state...\n"); + if (ocf_metadata_load_superblock(cache)) { + ocf_cache_log(cache, log_err, + "ERROR: Cannot load cache state\n"); + return -OCF_ERR_START_CACHE_FAIL; + } + + if (cache->conf_meta->cachelines != + ocf_metadata_get_cachelines_count(cache)) { + ocf_cache_log(cache, log_err, + "ERROR: Cache device size mismatch!\n"); + return -OCF_ERR_START_CACHE_FAIL; + } + + /* Count value will be re-calculated on the basis of 'added' flag */ + cache->conf_meta->core_obj_count = 0; + + /* Check in metadata which cores were added into cache */ + for (i = 0; i < OCF_CORE_MAX; i++) { + ocf_data_obj_t tobj = NULL; + ocf_core_t core = &cache->core_obj[i]; + + if (!cache->core_conf_meta[i].added) + continue; + + if (!cache->core_obj[i].obj.type) + goto _cache_mng_init_instance_add_cores_ERROR; + + ret = snprintf(core_name, sizeof(core_name), "%d", i); + if (ret < 0 || ret >= sizeof(core_name)) + goto _cache_mng_init_instance_add_cores_ERROR; + + ret = ocf_core_set_name(core, core_name, sizeof(core_name)); + if (ret) + goto _cache_mng_init_instance_add_cores_ERROR; + + tobj = ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache), + &core->obj.uuid, core->obj.type); + if (tobj) { + /* + * Attach bottom device to core structure + * in cache + */ + core->obj.type = tobj->type; + core->obj.priv = tobj->priv; + ocf_mngt_core_pool_remove(ocf_cache_get_ctx(cache), + tobj); + + core->opened = true; + ocf_cache_log(cache, log_info, + "Attached core %u from pool\n", i); + } else { + ret = ocf_data_obj_open(&core->obj); + if (ret == -OCF_ERR_NOT_OPEN_EXC) { + ocf_cache_log(cache, log_warn, + "Cannot open core %u. " + "Cache is busy", i); + } else if (ret) { + ocf_cache_log(cache, log_warn, + "Cannot open core %u", i); + } else { + core->opened = true; + } + } + + env_bit_set(i, cache->conf_meta->valid_object_bitmap); + cache->conf_meta->core_obj_count++; + core->obj.cache = cache; + + core->counters = + env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL); + if (!core->counters) + goto _cache_mng_init_instance_add_cores_ERROR; + + if (!core->opened) { + env_bit_set(ocf_cache_state_incomplete, + &cache->cache_state); + cache->ocf_core_inactive_count++; + ocf_cache_log(cache, log_warn, + "Cannot find core %u in pool" + ", core added as inactive\n", i); + continue; + } + + hd_lines = ocf_bytes_2_lines(cache, + ocf_data_obj_get_length( + &cache->core_obj[i].obj)); + + if (hd_lines) { + ocf_cache_log(cache, log_info, + "Disk lines = %" ENV_PRIu64 "\n", hd_lines); + } + } + + attach_params->flags.cores_opened = true; + return 0; + +_cache_mng_init_instance_add_cores_ERROR: + _ocf_mngt_close_all_uninitialized_cores(cache); + + return -OCF_ERR_START_CACHE_FAIL; +} + +/** + * @brief routine implementing "recovery" feature - flushes dirty data to + * underlying cores and closes them + * @param cache caching device that is opened but not fully initialized + */ +static int _recover_cache(struct ocf_cache *cache) +{ + ocf_cache_log(cache, log_warn, + "ERROR: Cache device did not shut down properly!\n"); + + ocf_cache_log(cache, log_info, "Initiating recovery sequence...\n"); + + if (ocf_metadata_load_recovery(cache)) { + ocf_cache_log(cache, log_err, + "Cannot read metadata for recovery\n"); + return -OCF_ERR_START_CACHE_FAIL; + } + + return 0; +} + +/** + * handle --start-cache -r variant + */ +static int _ocf_mngt_init_instance_recovery( + struct ocf_cachemng_attach_params *attach_params) +{ + int result = 0; + struct ocf_cache *cache = attach_params->cache; + ocf_cleaning_t cleaning_policy; + + OCF_ASSERT_PLUGGED(cache); + + init_attached_data_structures_recovery(cache); + + result = _recover_cache(cache); + if (result) + return result; + + cleaning_policy = cache->conf_meta->cleaning_policy_type; + if (cleaning_policy_ops[cleaning_policy].initialize) { + cleaning_policy_ops[cleaning_policy].initialize(cache, 1); + } + + if (ocf_metadata_flush_all(cache)) { + ocf_cache_log(cache, log_err, + "ERROR: Cannot save cache state\n"); + return -OCF_ERR_START_CACHE_FAIL; + } + + return 0; +} + +/** + * handle --start-cache -l variant + */ +static int _ocf_mngt_init_instance_load( + struct ocf_cachemng_attach_params *attach_params) +{ + struct ocf_cache *cache = attach_params->cache; + int ret; + + OCF_ASSERT_PLUGGED(cache); + + ret = _ocf_mngt_init_instance_add_cores(attach_params); + if (ret) + return ret; + + if (ocf_metadata_clean_shutdown != attach_params->metadata.shutdown_status) { + /* When dirty shutdown perform recovery */ + return _ocf_mngt_init_instance_recovery(attach_params); + } + + ret = ocf_metadata_load_all(cache); + if (ret) { + ocf_cache_log(cache, log_err, + "ERROR: Cannot load cache state\n"); + return -OCF_ERR_START_CACHE_FAIL; + } + + _init_partitions(cache); + + return ret; +} + +/** + * @brief allocate memory for new cache, add it to cache queue, set initial + * values and running state + */ +static int _ocf_mngt_init_new_cache(struct ocf_cachemng_init_params *params) +{ + struct ocf_cache *cache = env_vzalloc(sizeof(*cache)); + + if (!cache) + return -OCF_ERR_NO_MEM; + + if (env_rwsem_init(&cache->lock) || + env_mutex_init(&cache->flush_mutex)) { + env_vfree(cache); + return -OCF_ERR_NO_MEM; + } + + INIT_LIST_HEAD(&cache->list); + list_add_tail(&cache->list, ¶ms->ctx->caches); + env_atomic_set(&cache->ref_count, 1); + cache->owner = params->ctx; + + /* Copy all required initialization parameters */ + cache->cache_id = params->id; + + env_atomic_set(&(cache->last_access_ms), + env_ticks_to_msecs(env_get_tick_count())); + + env_bit_set(ocf_cache_state_initializing, &cache->cache_state); + + params->cache = cache; + params->flags.cache_alloc = true; + + return 0; +} + +static int _ocf_mngt_attach_cache_device(struct ocf_cache *cache, + struct ocf_cachemng_attach_params *attach_params) +{ + int ret; + + cache->device = env_vzalloc(sizeof(*cache->device)); + if (!cache->device) + return -OCF_ERR_NO_MEM; + attach_params->flags.device_alloc = true; + + cache->device->obj.cache = cache; + + /* Prepare UUID of cache data object */ + cache->device->obj.type = ocf_ctx_get_data_obj_type(cache->owner, + attach_params->device_type); + if (!cache->device->obj.type) { + ret = -OCF_ERR_INVAL_DATA_OBJ_TYPE; + goto _cache_mng_attach_cache_device_ERROR; + } + + if (ocf_uuid_cache_set(cache, &attach_params->uuid)) { + ret = -OCF_ERR_INVAL; + goto _cache_mng_attach_cache_device_ERROR; + } + attach_params->flags.uuid_alloc = true; + + /* + * Open cache device, It has to be done first because metadata service + * need to know size of cache device. + */ + ret = ocf_data_obj_open(&cache->device->obj); + if (ret) { + ocf_cache_log(cache, log_err, "ERROR: Cache not available\n"); + goto _cache_mng_attach_cache_device_ERROR; + } + attach_params->flags.device_opened = true; + + attach_params->device_size = ocf_data_obj_get_length(&cache->device->obj); + + /* Check minimum size of cache device */ + if (attach_params->device_size < OCF_CACHE_SIZE_MIN) { + ocf_cache_log(cache, log_err, "ERROR: Cache cache size must " + "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN / MiB); + ret = -OCF_ERR_START_CACHE_FAIL; + goto _cache_mng_attach_cache_device_ERROR; + } + + if (cache->metadata.is_volatile) { + cache->device->init_mode = ocf_init_mode_metadata_volatile; + } else { + cache->device->init_mode = attach_params->load ? + ocf_init_mode_load : ocf_init_mode_init; + } + + return 0; + +_cache_mng_attach_cache_device_ERROR: + return ret; +} + +/** + * @brief prepare cache for init. This is first step towards initializing + * the cache + */ +static int _ocf_mngt_init_prepare_cache(struct ocf_cachemng_init_params *param, + struct ocf_mngt_cache_config *cfg) +{ + struct ocf_cache *cache; + char cache_name[OCF_CACHE_NAME_SIZE]; + int ret = 0; + + ret = env_mutex_lock_interruptible(¶m->ctx->lock); + if (ret) + return ret; + + if (param->id == OCF_CACHE_ID_INVALID) { + /* ID was not specified, take first free id */ + param->id = _ocf_mngt_cache_find_free_id(param->ctx); + if (param->id == OCF_CACHE_ID_INVALID) { + ret = -OCF_ERR_TOO_MANY_CACHES; + goto out; + } + cfg->id = param->id; + } else { + /* ID was set, check if cache exist with specified ID */ + cache = _ocf_mngt_get_cache(param->ctx, param->id); + if (cache) { + /* Cache already exist */ + ret = -OCF_ERR_CACHE_EXIST; + goto out; + } + } + + if (cfg->name) { + ret = env_strncpy(cache_name, sizeof(cache_name), + cfg->name, cfg->name_size); + if (ret) + goto out; + } else { + ret = snprintf(cache_name, sizeof(cache_name), + "%hu", param->id); + if (ret < 0) + goto out; + } + + ocf_log(param->ctx, log_info, "Inserting cache %s\n", cache_name); + + ret = _ocf_mngt_init_new_cache(param); + if (ret) + goto out; + + cache = param->cache; + + ret = ocf_cache_set_name(cache, cache_name, sizeof(cache_name)); + if (ret) + goto out; + + cache->backfill.max_queue_size = cfg->backfill.max_queue_size; + cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size; + + env_rwsem_down_write(&cache->lock); /* Lock cache during setup */ + param->flags.cache_locked = true; + + cache->io_queues_no = cfg->io_queues; + cache->pt_unaligned_io = cfg->pt_unaligned_io; + cache->use_submit_io_fast = cfg->use_submit_io_fast; + + cache->eviction_policy_init = cfg->eviction_policy; + cache->metadata.is_volatile = cfg->metadata_volatile; + +out: + env_mutex_unlock(¶m->ctx->lock); + return ret; +} + + +/** + * @brief read data from given address and compare it against cmp_buffer + * + * @param[in] cache OCF cache + * @param[in] addr target adres for read operation + * @param[in] rw_buffer buffer to store data read from addr + * @param[in] cmp_buffer buffer to compare against + * @param[out] diff buffers diff + + * @return error code in case of error, 0 in case of success +*/ +static int __ocf_mngt_init_test_device_submit_and_cmp(struct ocf_cache *cache, + uint64_t addr, void *rw_buffer, void *cmp_buffer, int *diff) +{ + int ret; + + ret = ocf_submit_cache_page(cache, addr, OCF_READ, + rw_buffer); + if (ret) + goto end; + + ret = env_memcmp(rw_buffer, PAGE_SIZE, cmp_buffer, PAGE_SIZE, diff); + +end: + return ret; +} + +static int _ocf_mngt_init_test_device(struct ocf_cache *cache) +{ + unsigned long reserved_lba_addr; + void *rw_buffer = NULL, *cmp_buffer = NULL; + int ret; + int diff; + + rw_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (!rw_buffer) { + ret = -OCF_ERR_NO_MEM; + goto end; + } + + cmp_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL); + if (!cmp_buffer) { + ret = -OCF_ERR_NO_MEM; + goto end; + } + + reserved_lba_addr = ocf_metadata_get_reserved_lba(cache); + + /* + * Write buffer filled "1" + */ + + ENV_BUG_ON(env_memset(rw_buffer, PAGE_SIZE, 1)); + + ret = ocf_submit_cache_page(cache, reserved_lba_addr, + OCF_WRITE, rw_buffer); + if (ret) + goto end; + + /* + * First read + */ + + ENV_BUG_ON(env_memset(rw_buffer, PAGE_SIZE, 0)); + ENV_BUG_ON(env_memset(cmp_buffer, PAGE_SIZE, 1)); + + ret = __ocf_mngt_init_test_device_submit_and_cmp(cache, + reserved_lba_addr, rw_buffer, cmp_buffer, &diff); + if (ret) + goto end; + if (diff) { + /* we read back different data than what we had just + written - this is fatal error */ + ret = -EIO; + goto end; + } + + if (!ocf_data_obj_is_atomic(&cache->device->obj)) + goto end; + + /* + * Submit discard request + */ + ret = ocf_submit_obj_discard_wait(&cache->device->obj, + reserved_lba_addr, PAGE_SIZE); + if (ret) + goto end; + + /* + * Second read + */ + + ENV_BUG_ON(env_memset(rw_buffer, PAGE_SIZE, 1)); + ENV_BUG_ON(env_memset(cmp_buffer, PAGE_SIZE, 0)); + + ret = __ocf_mngt_init_test_device_submit_and_cmp(cache, + reserved_lba_addr, rw_buffer, cmp_buffer, &diff); + if (ret) + goto end; + + if (diff) { + /* discard does not cause target adresses to return 0 on + subsequent read */ + cache->device->obj.features.discard_zeroes = 0; + } + +end: + env_free(rw_buffer); + env_free(cmp_buffer); + + return ret; +} + +/** + * Prepare metadata accordingly to mode (for load/recovery read from disk) + */ +static int _ocf_mngt_init_prepare_metadata( + struct ocf_cachemng_attach_params *attach_params) +{ + int ret; + int i; + ocf_cache_t cache = attach_params->cache; + ocf_cache_line_size_t line_size = attach_params->metadata.line_size ? + attach_params->metadata.line_size : + cache->metadata.settings.size; + + OCF_ASSERT_PLUGGED(cache); + + if (cache->device->init_mode != ocf_init_mode_metadata_volatile) { + if (cache->device->init_mode == ocf_init_mode_load) { + attach_params->metadata.status = ocf_metadata_load_properties( + &cache->device->obj, + &line_size, + &cache->conf_meta->metadata_layout, + &cache->conf_meta->cache_mode, + &attach_params->metadata.shutdown_status, + &attach_params->metadata.dirty_flushed); + if (attach_params->metadata.status) { + ret = -OCF_ERR_START_CACHE_FAIL; + return ret; + } + } else { + attach_params->metadata.status = ocf_metadata_load_properties( + &cache->device->obj, + NULL, NULL, NULL, + &attach_params->metadata.shutdown_status, + &attach_params->metadata.dirty_flushed); + /* don't handle result; if no valid metadata is present + * on caching device, we are about to use, it's not an issue + */ + } + } + + /* + * Initialize variable size metadata segments + */ + if (ocf_metadata_init_variable_size(cache, attach_params->device_size, + line_size, + cache->conf_meta->metadata_layout)) { + return -OCF_ERR_START_CACHE_FAIL; + + } + ocf_cache_log(cache, log_debug, "Cache attached\n"); + attach_params->flags.attached_metadata_inited = true; + + for (i = 0; i < OCF_IO_CLASS_MAX + 1; ++i) { + cache->user_parts[i].runtime = + &cache->device->runtime_meta->user_parts[i]; + } + + cache->device->freelist_part = &cache->device->runtime_meta->freelist_part; + + ret = ocf_concurrency_init(cache); + if (!ret) + attach_params->flags.concurrency_inited = 1; + + return ret; +} + +/** + * @brief initializing cache anew (not loading or recovering) + */ +static int _ocf_mngt_init_instance_init(struct ocf_cachemng_attach_params *attach_params) +{ + struct ocf_cache *cache = attach_params->cache; + + if (!attach_params->metadata.status && !attach_params->force && + attach_params->metadata.shutdown_status != + ocf_metadata_detached) { + + if (attach_params->metadata.shutdown_status != + ocf_metadata_clean_shutdown) { + ocf_cache_log(cache, log_err, DIRTY_SHUTDOWN_ERROR_MSG); + return -OCF_ERR_DIRTY_SHUTDOWN; + } + + if (attach_params->metadata.dirty_flushed == DIRTY_NOT_FLUSHED) { + ocf_cache_log(cache, log_err, + DIRTY_NOT_FLUSHED_ERROR_MSG); + return -OCF_ERR_DIRTY_EXISTS; + } + } + + init_attached_data_structures(cache, + attach_params->cache->eviction_policy_init); + + /* In initial cache state there is no dirty data, so all dirty data is + considered to be flushed + */ + cache->conf_meta->dirty_flushed = true; + if (ocf_metadata_flush_all(cache)) { + ocf_cache_log(cache, log_err, + "ERROR: Cannot save cache state\n"); + return -OCF_ERR_WRITE_CACHE; + } + + return 0; +} + +static int check_ram_availability(ocf_ctx_t ctx, + struct ocf_cachemng_attach_params *attach_params) +{ + struct ocf_cache *cache = attach_params->cache; + ocf_cache_line_size_t line_size = cache->metadata.settings.size; + uint64_t const_data_size; + uint64_t cache_line_no; + uint64_t data_per_line; + uint64_t free_ram; + + /* Superblock + per core metadata */ + const_data_size = 50 * MiB; + + /* Cache metadata */ + cache_line_no = attach_params->device_size / line_size; + data_per_line = (52 + (2 * (line_size / KiB / 4))); + + attach_params->min_free_ram = const_data_size + cache_line_no * data_per_line; + + /* 110% of calculated value */ + attach_params->min_free_ram = (11 * attach_params->min_free_ram) / 10; + + free_ram = env_get_free_memory(); + + if (free_ram < attach_params->min_free_ram) { + ocf_log(ctx, log_err, "Not enough free RAM for cache " + "metadata to start cache\n"); + ocf_log(ctx, log_err, "Available RAM: %" ENV_PRIu64 " B\n", + free_ram); + ocf_log(ctx, log_err, "Needed RAM: %" ENV_PRIu64 " B\n", + attach_params->min_free_ram); + return -OCF_ERR_NO_FREE_RAM; + } + + return 0; +} + +/** + * finalize init instance action + * (same handling for all three initialization modes) + */ +static int _ocf_mngt_init_post_action(struct ocf_cachemng_attach_params *attach_params) +{ + int result = 0; + struct ocf_cache *cache = attach_params->cache; + + /* clear clean shutdown status */ + if (ocf_metadata_set_shutdown_status(cache, + ocf_metadata_dirty_shutdown)) { + ocf_cache_log(cache, log_err, "Cannot flush shutdown status\n"); + return -OCF_ERR_WRITE_CACHE; + } + + if (!attach_params->flags.cleaner_started) { + result = ocf_start_cleaner(cache); + if (result) { + ocf_cache_log(cache, log_err, + "Error while starting cleaner\n"); + return result; + } + attach_params->flags.cleaner_started = true; + } + + env_waitqueue_init(&cache->pending_dirty_wq); + env_waitqueue_init(&cache->pending_cache_wq); + + env_atomic_set(&cache->attached, 1); + + return 0; +} + +/** + * @brief for error handling do partial cleanup of datastructures upon + * premature function exit. + * + * @param cache cache instance + * @param ctx OCF context + * @param params - startup params containing initialization status flags. + * Value of NULL indicates cache is fully initialized but not + * handling any I/O (cache->valid_ocf_cache_device_t is 0). + */ +static void _ocf_mngt_init_handle_error(ocf_cache_t cache, + ocf_ctx_t ctx, struct ocf_cachemng_init_params *params) +{ + if (!params || params->flags.io_queues_started) + ocf_stop_queues(cache); + + if (!params || params->flags.queues_inited) + ocf_free_queues(cache); + + if (!params || params->flags.metadata_inited) + ocf_metadata_deinit(cache); + + env_mutex_lock(&ctx->lock); + + if (!params || params->flags.cache_alloc) { + list_del(&cache->list); + env_vfree(cache); + } + + env_mutex_unlock(&ctx->lock); +} + +static void _ocf_mngt_attach_handle_error( + struct ocf_cachemng_attach_params *attach_params) +{ + struct ocf_cache *cache = attach_params->cache; + + if (attach_params->flags.cleaner_started) + ocf_stop_cleaner(cache); + + if (attach_params->flags.cores_opened) + _ocf_mngt_close_all_uninitialized_cores(cache); + + if (attach_params->flags.attached_metadata_inited) + ocf_metadata_deinit_variable_size(cache); + + if (attach_params->flags.device_opened) + ocf_data_obj_close(&cache->device->obj); + + if (attach_params->flags.concurrency_inited) + ocf_concurrency_deinit(cache); + + if (attach_params->flags.uuid_alloc) + ocf_uuid_cache_clear(cache); + + if (attach_params->flags.device_alloc) + env_vfree(cache->device); +} + +static int _ocf_mngt_cache_discard_after_metadata(struct ocf_cache *cache) +{ + int result; + uint64_t addr = cache->device->metadata_offset; + uint64_t length = ocf_data_obj_get_length( + &cache->device->obj) - addr; + bool discard = cache->device->obj.features.discard_zeroes; + + if (!discard && ocf_data_obj_is_atomic(&cache->device->obj)) { + /* discard does not zero data - need to explicitly write + zeroes */ + result = ocf_submit_write_zeroes_wait( + &cache->device->obj, addr, length); + if (!result) { + result = ocf_submit_obj_flush_wait( + &cache->device->obj); + } + } else { + /* Discard object after metadata */ + result = ocf_submit_obj_discard_wait(&cache->device->obj, addr, + length); + } + + if (result) { + ocf_cache_log(cache, log_warn, "%s failed\n", + discard ? "Discarding whole cache device" : + "Overwriting cache with zeroes"); + + if (ocf_data_obj_is_atomic(&cache->device->obj)) { + ocf_cache_log(cache, log_err, "This step is required" + " for atomic mode!\n"); + } else { + ocf_cache_log(cache, log_warn, "This may impact cache" + " performance!\n"); + result = 0; + } + } + + return result; +} + +static int _ocf_mngt_cache_init(ocf_cache_t cache, + struct ocf_cachemng_init_params *params) +{ + int i; + int result; + + /* + * Super block elements initialization + */ + cache->conf_meta->cache_mode = params->metadata.cache_mode; + cache->conf_meta->metadata_layout = params->metadata.layout; + + for (i = 0; i < OCF_IO_CLASS_MAX + 1; ++i) { + cache->user_parts[i].config = + &cache->conf_meta->user_parts[i]; + } + + result = ocf_alloc_queues(cache); + if (result) + return result; + params->flags.queues_inited = 1; + + /* Init Partitions */ + ocf_part_init(cache); + + __init_cores(cache); + __init_metadata_version(cache); + __init_partitions(cache); + + return 0; +} + +static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache, + struct ocf_mngt_cache_config *cfg) +{ + struct ocf_cachemng_init_params params; + int result; + + ENV_BUG_ON(env_memset(¶ms, sizeof(params), 0)); + + params.id = cfg->id; + + params.ctx = ctx; + params.metadata.cache_mode = cfg->cache_mode; + params.metadata.layout = cfg->metadata_layout; + params.metadata.line_size = cfg->cache_line_size; + params.metadata_volatile = cfg->metadata_volatile; + params.locked = cfg->locked; + + /* Prepare cache */ + result = _ocf_mngt_init_prepare_cache(¶ms, cfg); + if (result) + goto _cache_mng_init_instance_ERROR; + + *cache = params.cache; + + /* + * Initialize metadata selected segments of metadata in memory + */ + result = ocf_metadata_init(*cache, params.metadata.line_size); + if (result) { + result = -OCF_ERR_START_CACHE_FAIL; + goto _cache_mng_init_instance_ERROR; + + } + + result = _ocf_mngt_cache_init(*cache, ¶ms); + if (result) + goto _cache_mng_init_instance_ERROR; + + ocf_log(ctx, log_debug, "Metadata initialized\n"); + params.flags.metadata_inited = true; + + if (!params.flags.io_queues_started) { + result = ocf_start_queues(*cache); + if (result) { + ocf_log(ctx, log_err, + "Error while creating I/O queues\n"); + return result; + } + params.flags.io_queues_started = true; + } + + if (params.locked) { + /* Increment reference counter to match cache_lock / + cache_unlock convention. User is expected to call + ocf_mngt_cache_unlock in future which would up the + semaphore as well as decrement ref_count. */ + env_atomic_inc(&(*cache)->ref_count); + } else { + /* User did not request to lock cache instance after creation - + up the semaphore here since we have acquired the lock to + perform management operations. */ + env_rwsem_up_write(&(*cache)->lock); + params.flags.cache_locked = false; + } + + return 0; + +_cache_mng_init_instance_ERROR: + _ocf_mngt_init_handle_error(params.cache, ctx, ¶ms); + *cache = NULL; + return result; +} + +static void _ocf_mng_cache_set_valid(ocf_cache_t cache) +{ + /* + * Clear initialization state and set the valid bit so we know + * its in use. + */ + cache->valid_ocf_cache_device_t = 1; + env_bit_clear(ocf_cache_state_initializing, &cache->cache_state); + env_bit_set(ocf_cache_state_running, &cache->cache_state); +} + +static int _ocf_mngt_cache_add_cores_t_clean_pol(ocf_cache_t cache) +{ + int clean_type = cache->conf_meta->cleaning_policy_type; + int i, j, no; + int result; + + if (cleaning_policy_ops[clean_type].add_core) { + no = cache->conf_meta->core_obj_count; + for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + result = cleaning_policy_ops[clean_type].add_core(cache, i); + if (result) { + goto err; + } + j++; + } + } + + return 0; + +err: + if (!cleaning_policy_ops[clean_type].remove_core) + return result; + + while (i--) { + if (env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + cleaning_policy_ops[clean_type].remove_core(cache, i); + }; + + return result; +} + +static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache) +{ + env_atomic_set(&cache->fallback_pt_error_counter, 0); +} + +static int _ocf_mngt_cache_attach(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg, + bool load) + +{ + struct ocf_cachemng_attach_params attach_params; + int result; + + ENV_BUG_ON(env_memset(&attach_params, sizeof(attach_params), 0)); + + if (cache->metadata.is_volatile && load) + return -EINVAL; + + attach_params.force = device_cfg->force; + attach_params.uuid = device_cfg->uuid; + attach_params.device_type = device_cfg->data_obj_type; + attach_params.perform_test = device_cfg->perform_test; + attach_params.metadata.shutdown_status = ocf_metadata_clean_shutdown; + attach_params.metadata.dirty_flushed = DIRTY_FLUSHED; + attach_params.metadata.line_size = device_cfg->cache_line_size; + attach_params.cache = cache; + attach_params.load = load; + + _ocf_mngt_init_attached_nonpersistent(cache); + + result = _ocf_mngt_attach_cache_device(cache, &attach_params); + if (result) + goto _cache_mng_init_attach_ERROR; + + result = check_ram_availability(ocf_cache_get_ctx(cache), + &attach_params); + device_cfg->min_free_ram = attach_params.min_free_ram; + if (result) + goto _cache_mng_init_attach_ERROR; + + /* Prepare metadata */ + result = _ocf_mngt_init_prepare_metadata(&attach_params); + if (result) + goto _cache_mng_init_attach_ERROR; + + /* Test device features */ + cache->device->obj.features.discard_zeroes = 1; + if (attach_params.perform_test) { + result = _ocf_mngt_init_test_device(cache); + if (result) + goto _cache_mng_init_attach_ERROR; + } + + switch (cache->device->init_mode) { + case ocf_init_mode_init: + case ocf_init_mode_metadata_volatile: + result = _ocf_mngt_init_instance_init(&attach_params); + break; + case ocf_init_mode_load: + result = _ocf_mngt_init_instance_load(&attach_params); + + break; + default: + result = OCF_ERR_INVAL; + } + + if (result) + goto _cache_mng_init_attach_ERROR; + + /* Discard whole device after metadata if it's a new instance. */ + if (device_cfg->discard_on_start && cache->device->init_mode != + ocf_init_mode_load) { + result = _ocf_mngt_cache_discard_after_metadata(cache); + if (result) + goto _cache_mng_init_attach_ERROR; + } + + if (cache->device->init_mode != ocf_init_mode_load) { + result = _ocf_mngt_cache_add_cores_t_clean_pol(cache); + if (result) + goto _cache_mng_init_attach_ERROR; + } + + result = _ocf_mngt_init_post_action(&attach_params); + if (result) + goto _cache_mng_init_attach_ERROR; + + return 0; + +_cache_mng_init_attach_ERROR: + _ocf_mngt_attach_handle_error(&attach_params); + return result; +} + + +static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg) +{ + if (cfg->id > OCF_CACHE_ID_MAX) + return -OCF_ERR_INVAL; + + if (!ocf_cache_mode_is_valid(cfg->cache_mode)) + return -OCF_ERR_INVALID_CACHE_MODE; + + if (cfg->eviction_policy >= ocf_eviction_max || + cfg->eviction_policy < 0) { + return -OCF_ERR_INVAL; + } + + if (!ocf_cache_line_size_is_valid(cfg->cache_line_size)) + return -OCF_ERR_INVALID_CACHE_LINE_SIZE; + + if (!cfg->io_queues) + return -OCF_ERR_INVAL; + + if (cfg->metadata_layout >= ocf_metadata_layout_max || + cfg->metadata_layout < 0) { + return -OCF_ERR_INVAL; + } + + return 0; +} + +static int _ocf_mngt_cache_validate_device_cfg( + struct ocf_mngt_cache_device_config *device_cfg) +{ + if (!device_cfg->uuid.data) + return -OCF_ERR_INVAL; + + if (device_cfg->uuid.size > OCF_DATA_OBJ_UUID_MAX_SIZE) + return -OCF_ERR_INVAL; + + if (device_cfg->cache_line_size && + !ocf_cache_line_size_is_valid(device_cfg->cache_line_size)) + return -OCF_ERR_INVALID_CACHE_LINE_SIZE; + + return 0; +} + +static const char *_ocf_cache_mode_names[ocf_cache_mode_max] = { + [ocf_cache_mode_wt] = "wt", + [ocf_cache_mode_wb] = "wb", + [ocf_cache_mode_wa] = "wa", + [ocf_cache_mode_pt] = "pt", + [ocf_cache_mode_wi] = "wi", +}; + +static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode) +{ + if (!ocf_cache_mode_is_valid(cache_mode)) + return NULL; + + return _ocf_cache_mode_names[cache_mode]; +} + +int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache, + struct ocf_mngt_cache_config *cfg) +{ + int result; + + if (!ctx || !cache || !cfg) + return -OCF_ERR_INVAL; + + result = _ocf_mngt_cache_validate_cfg(cfg); + if (result) + return result; + + result = _ocf_mngt_cache_start(ctx, cache, cfg); + if (!result) { + _ocf_mng_cache_set_valid(*cache); + + ocf_cache_log(*cache, log_info, "Successfully added\n"); + ocf_cache_log(*cache, log_info, "Cache mode : %s\n", + _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache))); + } else { + if (cfg->name) { + ocf_log(ctx, log_err, "Inserting cache %s failed\n", + cfg->name); + } else { + ocf_log(ctx, log_err, "Inserting cache failed\n"); + } + } + + return result; +} + +int ocf_mngt_cache_attach_nolock(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg) +{ + int result; + + if (!cache || !device_cfg) + return -OCF_ERR_INVAL; + + result = _ocf_mngt_cache_validate_device_cfg(device_cfg); + if (result) + return result; + + result = _ocf_mngt_cache_attach(cache, device_cfg, false); + if (!result) { + ocf_cache_log(cache, log_info, "Successfully attached\n"); + } else { + ocf_cache_log(cache, log_err, "Attaching cache device " + "failed\n"); + } + + return result; +} + +int ocf_mngt_cache_attach(ocf_cache_t cache, + struct ocf_mngt_cache_device_config *device_cfg) +{ + int result; + + if (!cache || !device_cfg) + return -OCF_ERR_INVAL; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_mngt_cache_attach_nolock(cache, device_cfg); + + ocf_mngt_cache_unlock(cache); + + return result; +} + +/** + * @brief Unplug caching device from cache instance. Variable size metadata + * containers are deinitialiazed as well as other cacheline related + * structures. Cache device object is closed. + * + * @param cache OCF cache instance + * @param stop - true if unplugging during stop - in this case we mark + * clean shutdown in metadata and flush all containers. + * - false if the device is to be detached from cache - loading + * metadata from this device will not be possible. + * + * @retval 0 operation successfull + * @retval non-zero error status + */ +static int _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop) +{ + int result; + + if (stop) + ENV_BUG_ON(cache->conf_meta->core_obj_count != 0); + + ocf_stop_cleaner(cache); + + __deinit_cleaning_policy(cache); + + if (ocf_mngt_cache_is_dirty(cache)) { + ENV_BUG_ON(!stop); + + cache->conf_meta->dirty_flushed = DIRTY_NOT_FLUSHED; + + ocf_cache_log(cache, log_warn, "Cache is still dirty. " + "DO NOT USE your core devices until flushing " + "dirty data!\n"); + } else { + cache->conf_meta->dirty_flushed = DIRTY_FLUSHED; + } + + if (!stop) { + /* Just set correct shutdown status */ + result = ocf_metadata_set_shutdown_status(cache, + ocf_metadata_detached); + } else { + /* Flush metadata */ + result = ocf_metadata_flush_all(cache); + } + + ocf_data_obj_close(&cache->device->obj); + + ocf_metadata_deinit_variable_size(cache); + ocf_concurrency_deinit(cache); + + ocf_uuid_cache_clear(cache); + + env_vfree(cache->device); + cache->device = NULL; + env_atomic_set(&cache->attached, 0); + + /* TODO: this should be removed from detach after 'attached' stats + are better separated in statistics */ + _ocf_mngt_init_attached_nonpersistent(cache); + + if (result) + return -OCF_ERR_WRITE_CACHE; + + return 0; +} + +static int _ocf_mngt_cache_stop(ocf_cache_t cache) +{ + int i, j, no, result = 0; + ocf_ctx_t owner = cache->owner; + + no = cache->conf_meta->core_obj_count; + + env_bit_set(ocf_cache_state_stopping, &cache->cache_state); + env_bit_clear(ocf_cache_state_running, &cache->cache_state); + + ocf_mngt_wait_for_io_finish(cache); + + /* All exported objects removed, cleaning up rest. */ + for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + cache_mng_core_remove_from_cache(cache, i); + if (ocf_cache_is_device_attached(cache)) + cache_mng_core_remove_from_cleaning_pol(cache, i); + cache_mng_core_close(cache, i); + j++; + } + ENV_BUG_ON(cache->conf_meta->core_obj_count != 0); + + if (env_atomic_read(&cache->attached)) + result = _ocf_mngt_cache_unplug(cache, true); + + ocf_stop_queues(cache); + + env_mutex_lock(&owner->lock); + /* Mark device uninitialized */ + cache->valid_ocf_cache_device_t = 0; + /* Remove cache from the list */ + list_del(&cache->list); + /* Finally release cache instance */ + ocf_mngt_cache_put(cache); + env_mutex_unlock(&owner->lock); + + return result; +} + +static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx) +{ + ocf_core_log(core, log_info, "Successfully added\n"); + + return 0; +} + +static void _ocf_mngt_cache_load_log(ocf_cache_t cache) +{ + ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache); + ocf_eviction_t eviction_type = cache->conf_meta->eviction_policy_type; + ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type; + + ocf_cache_log(cache, log_info, "Successfully loaded\n"); + ocf_cache_log(cache, log_info, "Cache mode : %s\n", + _ocf_cache_mode_get_name(cache_mode)); + ocf_cache_log(cache, log_info, "Eviction policy : %s\n", + evict_policy_ops[eviction_type].name); + ocf_cache_log(cache, log_info, "Cleaning policy : %s\n", + cleaning_policy_ops[cleaning_type].name); + ocf_core_visit(cache, _ocf_mngt_cache_load_core_log, + cache, false); +} + +int ocf_mngt_cache_load(ocf_ctx_t ctx, ocf_cache_t *cache, + struct ocf_mngt_cache_config *cfg, + struct ocf_mngt_cache_device_config *device_cfg) +{ + int result; + + if (!ctx || !cache || !cfg || !device_cfg) + return -OCF_ERR_INVAL; + + result = _ocf_mngt_cache_validate_cfg(cfg); + if (result) + return result; + + result = _ocf_mngt_cache_validate_device_cfg(device_cfg); + if (result) + return result; + + result = _ocf_mngt_cache_start(ctx, cache, cfg); + if (!result) { + ocf_cache_log(*cache, log_info, "Successfully added\n"); + } else { + if (cfg->name) { + ocf_log(ctx, log_err, "Inserting cache %s failed\n", + cfg->name); + } else { + ocf_log(ctx, log_err, "Inserting cache failed\n"); + } + return result; + } + + result = _ocf_mngt_cache_attach(*cache, device_cfg, true); + if (result) { + _ocf_mngt_init_handle_error(*cache, ctx, NULL); + return result; + } + + _ocf_mng_cache_set_valid(*cache); + + _ocf_mngt_cache_load_log(*cache); + + return 0; +} + +int ocf_mngt_cache_stop_nolock(ocf_cache_t cache) +{ + int result; + const char *cache_name; + ocf_ctx_t context; + + OCF_CHECK_NULL(cache); + + cache_name = ocf_cache_get_name(cache); + context = ocf_cache_get_ctx(cache); + + ocf_cache_log(cache, log_info, "Stopping cache\n"); + + result = _ocf_mngt_cache_stop(cache); + + if (result == -OCF_ERR_WRITE_CACHE) { + ocf_log(context, log_warn, "Stopped cache %s with " + "errors\n", cache_name); + } else if (result) { + ocf_log(context, log_err, "Stopping cache %s " + "failed\n", cache_name); + } else { + ocf_log(context, log_info, "Cache %s successfully " + "stopped\n", cache_name); + } + + return result; +} + +int ocf_mngt_cache_stop(ocf_cache_t cache) +{ + int result; + + OCF_CHECK_NULL(cache); + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_mngt_cache_stop_nolock(cache); + + ocf_mngt_cache_unlock(cache); + + return result; +} + +static int _cache_mng_set_core_seq_cutoff_threshold(ocf_core_t core, void *cntx) +{ + uint32_t threshold = *(uint32_t*) cntx; + ocf_cache_t cache = ocf_core_get_cache(core); + ocf_core_id_t core_id = ocf_core_get_id(core); + uint32_t threshold_old = cache->core_conf_meta[core_id]. + seq_cutoff_threshold; + + if (threshold_old == threshold) { + ocf_core_log(core, log_info, + "Sequential cutoff threshold %u bytes is " + "already set\n", threshold); + return 0; + } + cache->core_conf_meta[core_id].seq_cutoff_threshold = threshold; + + if (ocf_metadata_flush_superblock(cache)) { + ocf_core_log(core, log_err, "Failed to store sequential " + "cutoff threshold change. Reverting\n"); + cache->core_conf_meta[core_id].seq_cutoff_threshold = + threshold_old; + return -OCF_ERR_WRITE_CACHE; + } + + ocf_core_log(core, log_info, "Changing sequential cutoff " + "threshold from %u to %u bytes successful\n", + threshold_old, threshold); + + return 0; +} + +int ocf_mngt_set_seq_cutoff_threshold(ocf_cache_t cache, + ocf_core_id_t core_id, uint32_t thresh) +{ + int result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + if (core_id == OCF_CORE_ID_INVALID) { + /* Core id was not specified so threshold will be set + * for cache and all attached cores. + */ + result = ocf_core_visit(cache, + _cache_mng_set_core_seq_cutoff_threshold, + &thresh, + true); + } else { + /* Setting threshold for specified core. */ + ocf_core_t core; + result = ocf_core_get(cache, core_id, &core); + if (result) + goto END; + result = _cache_mng_set_core_seq_cutoff_threshold(core, &thresh); + } +END: + ocf_mngt_cache_unlock(cache); + + return result; +} + +static const char *_ocf_seq_cutoff_policy_names[ocf_seq_cutoff_policy_max] = { + [ocf_seq_cutoff_policy_always] = "always", + [ocf_seq_cutoff_policy_full] = "full", + [ocf_seq_cutoff_policy_never] = "never", +}; + +static const char *_cache_mng_seq_cutoff_policy_get_name( + ocf_seq_cutoff_policy policy) +{ + if (policy < 0 || policy >= ocf_seq_cutoff_policy_max) + return NULL; + + return _ocf_seq_cutoff_policy_names[policy]; +} + +static int _cache_mng_set_core_seq_cutoff_policy(ocf_core_t core, void *cntx) +{ + ocf_seq_cutoff_policy policy = *(ocf_seq_cutoff_policy*) cntx; + ocf_cache_t cache = ocf_core_get_cache(core); + ocf_core_id_t core_id = ocf_core_get_id(core); + uint32_t policy_old = cache->core_conf_meta[core_id].seq_cutoff_policy; + + if (policy_old == policy) { + ocf_core_log(core, log_info, + "Sequential cutoff policy %s is already set\n", + _cache_mng_seq_cutoff_policy_get_name(policy)); + return 0; + } + + cache->core_conf_meta[core_id].seq_cutoff_policy = policy; + + if (ocf_metadata_flush_superblock(cache)) { + ocf_core_log(core, log_err, "Failed to store sequential " + "cutoff policy change. Reverting\n"); + cache->core_conf_meta[core_id].seq_cutoff_policy = policy_old; + return -OCF_ERR_WRITE_CACHE; + } + + ocf_core_log(core, log_info, + "Changing sequential cutoff policy from %s to %s\n", + _cache_mng_seq_cutoff_policy_get_name(policy_old), + _cache_mng_seq_cutoff_policy_get_name(policy)); + + return 0; +} + +int ocf_mngt_set_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id, + ocf_seq_cutoff_policy policy) +{ + ocf_core_t core; + int result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + if (core_id == OCF_CORE_ID_INVALID) { + /* Core id was not specified so policy will be set + * for cache and all attached cores. + */ + result = ocf_core_visit(cache, + _cache_mng_set_core_seq_cutoff_policy, + &policy, + true); + } else { + /* Setting policy for specified core. */ + result = ocf_core_get(cache, core_id, &core); + if (result) + goto END; + result = _cache_mng_set_core_seq_cutoff_policy(core, &policy); + } +END: + ocf_mngt_cache_unlock(cache); + + return result; +} + +int ocf_mngt_get_seq_cutoff_threshold(ocf_cache_t cache, ocf_core_id_t core_id, + uint32_t *thresh) +{ + ocf_core_t core; + int result; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + + *thresh = ocf_core_get_seq_cutoff_threshold(core); + +out: + ocf_mngt_cache_unlock(cache); + + return result; +} + +int ocf_mngt_get_seq_cutoff_policy(ocf_cache_t cache, ocf_core_id_t core_id, + ocf_seq_cutoff_policy *policy) +{ + ocf_core_t core; + int result; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_core_get(cache, core_id, &core); + if (result) + goto out; + *policy = ocf_core_get_seq_cutoff_policy(core); + +out: + ocf_mngt_cache_unlock(cache); + + return result; +} + +static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode, + uint8_t flush) +{ + ocf_cache_mode_t mode_new = mode; + ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode; + int result = 0; + + /* Check if IO interface type is valid */ + if (!ocf_cache_mode_is_valid(mode)) + return -OCF_ERR_INVAL; + + if (mode_new == mode_old) { + ocf_cache_log(cache, log_info, "Cache mode '%s' is already set\n", + ocf_get_io_iface_name(mode_new)); + return 0; + } + + cache->conf_meta->cache_mode = mode_new; + + if (flush) { + /* Flush required, do it, do it, do it... */ + result = ocf_mngt_cache_flush_nolock(cache, true); + + if (result) { + cache->conf_meta->cache_mode = mode_old; + return result; + } + + } else if (ocf_cache_mode_wb == mode_old) { + int i; + + for (i = 0; i != OCF_CORE_MAX; ++i) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + env_atomic_set(&cache->core_runtime_meta[i]. + initial_dirty_clines, + env_atomic_read(&cache-> + core_runtime_meta[i].dirty_clines)); + } + } + + if (ocf_metadata_flush_superblock(cache)) { + ocf_cache_log(cache, log_err, "Failed to store cache mode " + "change. Reverting\n"); + cache->conf_meta->cache_mode = mode_old; + return -OCF_ERR_WRITE_CACHE; + } + + ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' " + "successful\n", ocf_get_io_iface_name(mode_old), + ocf_get_io_iface_name(mode_new)); + + return 0; +} + +int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode, + uint8_t flush) +{ + int result; + + OCF_CHECK_NULL(cache); + + if (!ocf_cache_mode_is_valid(mode)) { + ocf_cache_log(cache, log_err, "Cache mode %u is invalid\n", mode); + return -OCF_ERR_INVAL; + } + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = _cache_mng_set_cache_mode(cache, mode, flush); + + if (result) { + const char *name = ocf_get_io_iface_name(mode); + + ocf_cache_log(cache, log_err, "Setting cache mode '%s' " + "failed\n", name); + } + + ocf_mngt_cache_unlock(cache); + + return result; +} + +int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + + if (ocf_fallback_pt_is_on(cache)) { + ocf_cache_log(cache, log_info, + "Fallback Pass Through inactive\n"); + } + + env_atomic_set(&cache->fallback_pt_error_counter, 0); + + return 0; +} + +int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache, + uint32_t new_threshold) +{ + bool old_fallback_pt_state, new_fallback_pt_state; + + OCF_CHECK_NULL(cache); + + if (new_threshold > OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD) + return -OCF_ERR_INVAL; + + old_fallback_pt_state = ocf_fallback_pt_is_on(cache); + + cache->fallback_pt_error_threshold = new_threshold; + + new_fallback_pt_state = ocf_fallback_pt_is_on(cache); + + if (old_fallback_pt_state != new_fallback_pt_state) { + if (new_fallback_pt_state) { + ocf_cache_log(cache, log_info, "Error threshold reached. " + "Fallback Pass Through activated\n"); + } else { + ocf_cache_log(cache, log_info, "Fallback Pass Through " + "inactive\n"); + } + } + + return 0; +} + +int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache, + uint32_t *threshold) +{ + OCF_CHECK_NULL(cache); + OCF_CHECK_NULL(threshold); + + *threshold = cache->fallback_pt_error_threshold; + + return 0; +} + +int ocf_mngt_cache_detach(ocf_cache_t cache) +{ + int i, j, no; + int result; + ocf_cache_mode_t mode; + + no = cache->conf_meta->core_obj_count; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + if (!env_atomic_read(&cache->attached)) { + result = -EINVAL; + goto unlock; + } + + /* temporarily switch to PT */ + mode = cache->conf_meta->cache_mode; + result = _cache_mng_set_cache_mode(cache, ocf_cache_mode_pt, true); + if (result) + goto unlock; + + /* wait for all requests referencing cacheline metadata to finish */ + env_atomic_set(&cache->attached, 0); + env_waitqueue_wait(cache->pending_cache_wq, + !env_atomic_read(&cache->pending_cache_requests)); + + /* Restore original mode in metadata - it will be used when new + cache device is attached. By this tume all requests are served + in direct-to-core mode. */ + cache->conf_meta->cache_mode = mode; + + /* remove cacheline metadata and cleaning policy meta for all cores */ + for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + cache_mng_core_deinit_attached_meta(cache, i); + cache_mng_core_remove_from_cleaning_pol(cache, i); + j++; + } + + /* Do the actual detach - deinit cacheline metadata, stop cleaner + thread and close cache bottom device */ + result = _ocf_mngt_cache_unplug(cache, false); + + if (!result) { + ocf_cache_log(cache, log_info, "Successfully detached\n"); + } else { + if (result == -OCF_ERR_WRITE_CACHE) { + ocf_cache_log(cache, log_warn, + "Detached cache with errors\n"); + } else { + ocf_cache_log(cache, log_err, + "Detaching cache failed\n"); + } + } + +unlock: + ocf_mngt_cache_unlock(cache); + + return result; +} diff --git a/src/mngt/ocf_mngt_common.c b/src/mngt/ocf_mngt_common.c new file mode 100644 index 0000000..3482e70 --- /dev/null +++ b/src/mngt/ocf_mngt_common.c @@ -0,0 +1,448 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../ocf_ctx_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_part.h" +#include "../utils/utils_rq.h" +#include "../utils/utils_device.h" +#include "../eviction/ops.h" +#include "../ocf_logger_priv.h" +#include "../ocf_queue_priv.h" + +/* Close if opened */ +int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id) +{ + if (!cache->core_obj[core_id].opened) + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + + ocf_data_obj_close(&cache->core_obj[core_id].obj); + cache->core_obj[core_id].opened = false; + + return 0; +} + +/* Remove core from cleaning policy */ +void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache, + int core_id) +{ + ocf_cleaning_t clean_pol_type; + + OCF_METADATA_LOCK_WR(); + + clean_pol_type = cache->conf_meta->cleaning_policy_type; + if (cache->core_obj[core_id].opened) { + if (cleaning_policy_ops[clean_pol_type].remove_core) { + cleaning_policy_ops[clean_pol_type]. + remove_core(cache, core_id); + } + } + + OCF_METADATA_UNLOCK_WR(); +} + +/* Deinitialize core metadata in attached metadata */ +void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id) +{ + int retry = 1; + uint64_t core_size = 0; + ocf_cleaning_t clean_pol_type; + ocf_data_obj_t core; + + core = &cache->core_obj[core_id].obj; + + core_size = ocf_data_obj_get_length(core); + if (!core_size) + core_size = ~0ULL; + + OCF_METADATA_LOCK_WR(); + + clean_pol_type = cache->conf_meta->cleaning_policy_type; + while (retry) { + retry = 0; + if (cleaning_policy_ops[clean_pol_type].purge_range) { + retry = cleaning_policy_ops[clean_pol_type].purge_range(cache, + core_id, 0, core_size); + } + + if (!retry) { + /* Remove from collision_table and Partition. Put in FREELIST */ + retry = ocf_metadata_sparse_range(cache, core_id, 0, + core_size); + } + + if (retry) { + OCF_METADATA_UNLOCK_WR(); + env_msleep(100); + OCF_METADATA_LOCK_WR(); + } + } + + OCF_METADATA_UNLOCK_WR(); +} + +/* Mark core as removed in metadata */ +void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id) +{ + OCF_METADATA_LOCK_WR(); + + /* In metadata mark data this core was removed from cache */ + cache->core_conf_meta[core_id].added = false; + + /* Clear UUID of core */ + ocf_uuid_core_clear(cache, &cache->core_obj[core_id]); + cache->core_conf_meta[core_id].seq_no = OCF_SEQ_NO_INVALID; + + OCF_METADATA_UNLOCK_WR(); +} + +/* Deinit in-memory structures related to this core */ +void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id) +{ + env_free(cache->core_obj[core_id].counters); + cache->core_obj[core_id].counters = NULL; + env_bit_clear(core_id, cache->conf_meta->valid_object_bitmap); + + if (!cache->core_obj[core_id].opened && + --cache->ocf_core_inactive_count == 0) { + env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state); + } + + cache->conf_meta->core_obj_count--; +} + +/** + * @brief Wait for the end of asynchronous cleaning + * + * @param cache OCF cache instance + * @param timeout_ms Timeout for waiting in milliseconds + * @note When timeout is less than zero it means wait forever + * + * @retval 0 cleaning finished + * @retval non-zero timeout and cleaning still in progress + */ +static int _ocf_cleaning_wait_for_finish(struct ocf_cache *cache, + const int32_t timeout_ms) +{ + struct ocf_user_part *curr_part; + ocf_part_id_t part_id; + bool cleaning_active = ocf_cache_is_device_attached(cache); + int64_t _timeout = timeout_ms; + + while (cleaning_active) { + cleaning_active = false; + + OCF_METADATA_LOCK_WR(); + for_each_part(cache, curr_part, part_id) { + if (env_atomic_read(&cache->cleaning[part_id])) { + cleaning_active = true; + break; + } + } + OCF_METADATA_UNLOCK_WR(); + + if (cleaning_active) { + env_msleep(20); + + if (timeout_ms >= 0) { + _timeout -= 20; + if (_timeout <= 0) + break; + } + } + }; + + if (cleaning_active) + return -EBUSY; + else + return 0; +} + +void ocf_mngt_cache_put(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + + if (env_atomic_dec_return(&cache->ref_count) == 0) { + ocf_free_queues(cache); + ocf_metadata_deinit(cache); + env_vfree(cache); + } +} + +int ocf_mngt_cache_get(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *cache) +{ + int error = 0; + struct ocf_cache *instance = NULL; + struct ocf_cache *iter = NULL; + + OCF_CHECK_NULL(ocf_ctx); + OCF_CHECK_NULL(cache); + + *cache = NULL; + + if ((id < OCF_CACHE_ID_MIN) || (id > OCF_CACHE_ID_MAX)) { + /* Cache id out of range */ + return -OCF_ERR_INVAL; + } + + /* Lock caches list */ + env_mutex_lock(&ocf_ctx->lock); + + list_for_each_entry(iter, &ocf_ctx->caches, list) { + if (iter->cache_id == id) { + instance = iter; + break; + } + } + + if (instance) { + /* if cache is either fully initialized or during recovery */ + if (instance->valid_ocf_cache_device_t) { + /* Increase reference counter */ + env_atomic_inc(&instance->ref_count); + } else { + /* Cache not initialized yet */ + instance = NULL; + } + } + + env_mutex_unlock(&ocf_ctx->lock); + + if (!instance) + error = -OCF_ERR_CACHE_NOT_EXIST; + else + *cache = instance; + + return error; +} + +bool ocf_mngt_is_cache_locked(ocf_cache_t cache) +{ + if (env_rwsem_is_locked(&cache->lock)) + return true; + + if (env_atomic_read(&cache->lock_waiter)) + return true; + + return false; +} + +void ocf_mngt_cache_unlock(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + env_rwsem_up_write(&cache->lock); + ocf_mngt_cache_put(cache); +} + +void ocf_mngt_cache_read_unlock(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + env_rwsem_up_read(&cache->lock); + ocf_mngt_cache_put(cache); +} + +int _ocf_mngt_cache_lock(ocf_cache_t cache, bool read) +{ + int ret; + + /* Increment reference counter */ + env_atomic_inc(&cache->ref_count); + + env_atomic_inc(&cache->lock_waiter); + if (read) + ret = env_rwsem_down_read_interruptible(&cache->lock); + else + ret = env_rwsem_down_write_interruptible(&cache->lock); + env_atomic_dec(&cache->lock_waiter); + + if (ret) { + ocf_mngt_cache_put(cache); + return ret; + } + + if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) { + /* Cache already stooping, do not allow any operation */ + ret = -OCF_ERR_CACHE_NOT_EXIST; + goto unlock; + } + + /* Return, when asynchronous cleaning is finished */ + if (_ocf_cleaning_wait_for_finish(cache, 60 * 1000)) { + /* Because of some reasons, asynchronous cleaning still active, + * cannot continue + */ + ret = -OCF_ERR_CACHE_IN_USE; + goto unlock; + } + + return 0; + +unlock: + if (read) + ocf_mngt_cache_read_unlock(cache); + else + ocf_mngt_cache_unlock(cache); + + return ret; +} + +int ocf_mngt_cache_lock(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return _ocf_mngt_cache_lock(cache, false); +} + +int ocf_mngt_cache_read_lock(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return _ocf_mngt_cache_lock(cache, true); +} + +/* if cache is either fully initialized or during recovery */ +static ocf_cache_t _ocf_mngt_cache_try_get(ocf_cache_t cache) +{ + if (!!cache->valid_ocf_cache_device_t) { + /* Increase reference counter */ + env_atomic_inc(&cache->ref_count); + return cache; + } + + return NULL; +} + +static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list, + uint32_t *size) +{ + int result = 0; + uint32_t count = 0, i = 0; + struct ocf_cache *iter, *this; + + *list = NULL; + *size = 0; + + env_mutex_lock(&ocf_ctx->lock); + + list_for_each_entry(iter, &ocf_ctx->caches, list) { + count++; + } + + if (!count) + goto END; + + *list = env_vmalloc(sizeof((*list)[0]) * count); + if (*list == NULL) { + result = -ENOMEM; + goto END; + } + + list_for_each_entry(iter, &ocf_ctx->caches, list) { + this = _ocf_mngt_cache_try_get(iter); + + if (this) { + (*list)[i] = this; + i++; + } + } + + if (i) { + /* Update size if cache list */ + *size = i; + } else { + env_vfree(*list); + *list = NULL; + } + +END: + env_mutex_unlock(&ocf_ctx->lock); + return result; +} + +int ocf_mngt_cache_visit(ocf_ctx_t ocf_ctx, ocf_mngt_cache_visitor_t visitor, + void *cntx) +{ + ocf_cache_t *list; + uint32_t size, i; + int result; + + OCF_CHECK_NULL(ocf_ctx); + OCF_CHECK_NULL(visitor); + + result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size); + if (result) + return result; + + if (size == 0) + return 0; + + /* Iterate over caches */ + for (i = 0; i < size; i++) { + ocf_cache_t this = list[i]; + + result = visitor(this, cntx); + + if (result) + break; + } + + /* Put caches */ + for (i = 0; i < size; i++) + ocf_mngt_cache_put(list[i]); + + env_vfree(list); + + return result; +} + +int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx, + ocf_mngt_cache_visitor_t visitor, void *cntx) +{ + ocf_cache_t *list; + uint32_t size, i; + int result; + + OCF_CHECK_NULL(ocf_ctx); + OCF_CHECK_NULL(visitor); + + result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size); + if (result) + return result; + + if (size == 0) + return 0; + + /* Iterate over caches */ + for (i = size; i; i--) { + ocf_cache_t this = list[i - 1]; + + result = visitor(this, cntx); + + if (result) + break; + } + + /* Put caches */ + for (i = 0; i < size; i++) + ocf_mngt_cache_put(list[i]); + + env_vfree(list); + + return result; +} + +void ocf_mngt_wait_for_io_finish(ocf_cache_t cache) +{ + uint32_t rq_active = 0; + + do { + rq_active = ocf_rq_get_allocated(cache); + if (rq_active) + env_msleep(500); + } while (rq_active); +} + diff --git a/src/mngt/ocf_mngt_common.h b/src/mngt/ocf_mngt_common.h new file mode 100644 index 0000000..60b3a3f --- /dev/null +++ b/src/mngt/ocf_mngt_common.h @@ -0,0 +1,35 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#ifndef __OCF_MNGT_COMMON_H__ +#define __OCF_MNGT_COMMON_H__ + +int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id); + +void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id); + +void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id); + +void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id); + +void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache, + int core_id); + +int _ocf_cleaning_thread(void *priv); + +int cache_mng_thread_io_requests(void *data); + +bool ocf_mngt_cache_is_dirty(ocf_cache_t cache); + +void ocf_mngt_wait_for_io_finish(ocf_cache_t cache); + +int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache, + ocf_part_id_t part_id, const char *name, uint32_t min_size, + uint32_t max_size, uint8_t priority, bool valid); + +bool ocf_mngt_is_cache_locked(ocf_cache_t cache); + +#endif /* __OCF_MNGT_COMMON_H__ */ diff --git a/src/mngt/ocf_mngt_core.c b/src/mngt/ocf_mngt_core.c new file mode 100644 index 0000000..a13a98e --- /dev/null +++ b/src/mngt/ocf_mngt_core.c @@ -0,0 +1,480 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_device.h" +#include "../ocf_stats_priv.h" +#include "../ocf_def_priv.h" + +static ocf_seq_no_t _ocf_mngt_get_core_seq_no(ocf_cache_t cache) +{ + if (cache->conf_meta->curr_core_seq_no == OCF_SEQ_NO_MAX) + return OCF_SEQ_NO_INVALID; + + return ++cache->conf_meta->curr_core_seq_no; +} + +static int _ocf_mngt_cache_try_add_core(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg) +{ + int result = 0; + struct ocf_core *core_obj; + ocf_data_obj_t obj; + + core_obj = &cache->core_obj[cfg->core_id]; + obj = &core_obj->obj; + + if (ocf_ctx_get_data_obj_type_id(cache->owner, obj->type) != + cfg->data_obj_type) { + result = -OCF_ERR_INVAL_DATA_OBJ_TYPE; + goto error_out; + } + + result = ocf_data_obj_open(obj); + if (result) + goto error_out; + + if (!ocf_data_obj_get_length(obj)) { + result = -OCF_ERR_CORE_NOT_AVAIL; + goto error_after_open; + } + + cache->core_obj[cfg->core_id].opened = true; + + if (!(--cache->ocf_core_inactive_count)) + env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state); + + *core = core_obj; + return 0; + +error_after_open: + ocf_data_obj_close(obj); +error_out: + *core = NULL; + return result; +} + +static int _ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg) +{ + int result = 0; + struct ocf_core *core_obj; + ocf_data_obj_t obj; + ocf_seq_no_t core_sequence_no; + ocf_cleaning_t clean_type; + uint64_t length; + + core_obj = &cache->core_obj[cfg->core_id]; + obj = &core_obj->obj; + + core_obj->obj.cache = cache; + + /* Set uuid */ + ocf_uuid_core_set(cache, core_obj, &cfg->uuid); + + obj->type = ocf_ctx_get_data_obj_type(cache->owner, cfg->data_obj_type); + if (!obj->type) { + result = -OCF_ERR_INVAL_DATA_OBJ_TYPE; + goto error_out; + } + + if (cfg->user_metadata.data && cfg->user_metadata.size > 0) { + result = ocf_core_set_user_metadata_raw(core_obj, + cfg->user_metadata.data, + cfg->user_metadata.size); + if (result) + goto error_out; + } + + result = ocf_data_obj_open(obj); + if (result) + goto error_out; + + length = ocf_data_obj_get_length(obj); + if (!length) { + result = -OCF_ERR_CORE_NOT_AVAIL; + goto error_after_open; + } + cache->core_conf_meta[cfg->core_id].length = length; + + clean_type = cache->conf_meta->cleaning_policy_type; + if (ocf_cache_is_device_attached(cache) && + cleaning_policy_ops[clean_type].add_core) { + result = cleaning_policy_ops[clean_type].add_core(cache, + cfg->core_id); + if (result) + goto error_after_open; + } + + /* When adding new core to cache, allocate stat counters */ + core_obj->counters = + env_zalloc(sizeof(*core_obj->counters), ENV_MEM_NORMAL); + if (!core_obj->counters) { + result = -OCF_ERR_NO_MEM; + goto error_after_clean_pol; + } + /* When adding new core to cache, reset all core/cache statistics */ + ocf_stats_init(core_obj); + env_atomic_set(&cache->core_runtime_meta[cfg->core_id]. + cached_clines, 0); + env_atomic_set(&cache->core_runtime_meta[cfg->core_id]. + dirty_clines, 0); + env_atomic64_set(&cache->core_runtime_meta[cfg->core_id]. + dirty_since, 0); + + /* In metadata mark data this core was added into cache */ + env_bit_set(cfg->core_id, cache->conf_meta->valid_object_bitmap); + cache->core_conf_meta[cfg->core_id].added = true; + cache->core_obj[cfg->core_id].opened = true; + + /* Set default cache parameters for sequential */ + cache->core_conf_meta[cfg->core_id].seq_cutoff_policy = + ocf_seq_cutoff_policy_default; + cache->core_conf_meta[cfg->core_id].seq_cutoff_threshold = + cfg->seq_cutoff_threshold; + + /* Add core sequence number for atomic metadata matching */ + core_sequence_no = _ocf_mngt_get_core_seq_no(cache); + if (core_sequence_no == OCF_SEQ_NO_INVALID) { + result = -OCF_ERR_TOO_MANY_CORES; + goto error_after_counters_allocation; + } + cache->core_conf_meta[cfg->core_id].seq_no = core_sequence_no; + + /* Update super-block with core device addition */ + if (ocf_metadata_flush_superblock(cache)) { + result = -OCF_ERR_WRITE_CACHE; + goto error_after_counters_allocation; + } + + /* Increase value of added cores */ + cache->conf_meta->core_obj_count++; + + *core = core_obj; + return 0; + +error_after_counters_allocation: + env_bit_clear(cfg->core_id, cache->conf_meta->valid_object_bitmap); + cache->core_conf_meta[cfg->core_id].added = false; + cache->core_obj[cfg->core_id].opened = false; + + /* An error when flushing metadata, try restore for safety reason + * previous metadata sate on cache device. + * But if that fails too, we are scr**ed... or maybe: + * TODO: Handle situation when we can't flush metadata by + * trying to flush all the dirty data and switching to non-wb + * cache mode. + */ + ocf_metadata_flush_superblock(cache); + + env_free(core_obj->counters); + core_obj->counters = NULL; + +error_after_clean_pol: + if (cleaning_policy_ops[clean_type].remove_core) + cleaning_policy_ops[clean_type].remove_core(cache, cfg->core_id); + +error_after_open: + ocf_data_obj_close(obj); +error_out: + ocf_uuid_core_clear(cache, core_obj); + *core = NULL; + return result; +} + +static unsigned long _ffz(unsigned long word) +{ + asm("rep; bsf %1,%0" + : "=r" (word) + : "r" (~word)); + return word; +} + +static unsigned long _ocf_mngt_find_first_free_core(const unsigned long *bitmap, + unsigned long size) +{ + unsigned long i; + unsigned long ret = size; + + /* check core 0 availability */ + bool zero_core_free = !(*bitmap & 0x1UL); + + /* check if any core id is free except 0 */ + for (i = 0; i * sizeof(unsigned long) * 8 < size; i++) { + unsigned long long ignore_mask = (i == 0) ? 1UL : 0UL; + if (~(bitmap[i] | ignore_mask)) { + ret = MIN(size, i * sizeof(unsigned long) * 8 + + _ffz(bitmap[i] | ignore_mask)); + break; + } + } + + /* return 0 only if no other core is free */ + if (ret == size && zero_core_free) + return 0; + + return ret; +} + +static int __ocf_mngt_lookup_core_uuid(ocf_cache_t cache, + struct ocf_mngt_core_config *cfg) +{ + int i; + + for (i = 0; i < OCF_CORE_MAX; i++) { + ocf_core_t core = &cache->core_obj[i]; + + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + if (cache->core_obj[i].opened) + continue; + + if (ocf_ctx_get_data_obj_type_id(cache->owner, core->obj.type) + != cfg->data_obj_type) { + continue; + } + + if (!env_strncmp(core->obj.uuid.data, cfg->uuid.data, + min(core->obj.uuid.size, + cfg->uuid.size))) + return i; + } + + return OCF_CORE_MAX; +} + +static int __ocf_mngt_try_find_core_id(ocf_cache_t cache, + struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id) +{ + if (tmp_core_id == OCF_CORE_MAX) { + /* FIXME: uuid.data could be not NULL-terminated ANSI string */ + ocf_cache_log(cache, log_err, "Core with uuid %s not found in " + "cache metadata\n", (char*) cfg->uuid.data); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + if (cfg->core_id != tmp_core_id) { + ocf_cache_log(cache, log_err, + "Given core id doesn't match with metadata\n"); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + + cfg->core_id = tmp_core_id; + return 0; +} + +static int __ocf_mngt_find_core_id(ocf_cache_t cache, + struct ocf_mngt_core_config *cfg, ocf_core_id_t tmp_core_id) +{ + if (tmp_core_id != OCF_CORE_MAX) { + ocf_cache_log(cache, log_err, + "Core ID already added as inactive with id:" + " %hu.\n", tmp_core_id); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + if (cfg->core_id == OCF_CORE_MAX) { + ocf_cache_log(cache, log_debug, "Core ID is unspecified - " + "will set first available number\n"); + + /* Core is unspecified */ + cfg->core_id = _ocf_mngt_find_first_free_core( + cache->conf_meta->valid_object_bitmap, + OCF_CORE_MAX); + /* no need to check if find_first_zero_bit failed and + * *core_id == MAX_CORE_OBJS_PER_CACHE, as above there is check + * for core_obj_count being greater or equal to + * MAX_CORE_OBJS_PER_CACHE + */ + } else if (cfg->core_id < OCF_CORE_MAX) { + /* check if id is not used already */ + if (env_bit_test(cfg->core_id, + cache->conf_meta->valid_object_bitmap)) { + ocf_cache_log(cache, log_debug, + "Core ID already allocated: %d.\n", + cfg->core_id); + return -OCF_ERR_CORE_NOT_AVAIL; + } + } else { + ocf_cache_log(cache, log_err, + "Core ID exceeds maximum of %d.\n", + OCF_CORE_MAX); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + return 0; +} + +static int _ocf_mngt_find_core_id(ocf_cache_t cache, + struct ocf_mngt_core_config *cfg) +{ + int result; + ocf_core_id_t tmp_core_id; + + if (cache->conf_meta->core_obj_count >= OCF_CORE_MAX) + return -OCF_ERR_TOO_MANY_CORES; + + tmp_core_id = __ocf_mngt_lookup_core_uuid(cache, cfg); + + if (cfg->try_add) + result = __ocf_mngt_try_find_core_id(cache, cfg, tmp_core_id); + else + result = __ocf_mngt_find_core_id(cache, cfg, tmp_core_id); + + return result; +} + +int ocf_mngt_cache_add_core_nolock(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg) +{ + int result; + char core_name[OCF_CORE_NAME_SIZE]; + + OCF_CHECK_NULL(cache); + OCF_CHECK_NULL(core); + + result = _ocf_mngt_find_core_id(cache, cfg); + if (result) + return result; + + if (cfg->name) { + result = env_strncpy(core_name, sizeof(core_name), cfg->name, + cfg->name_size); + if (result) + return result; + } else { + result = snprintf(core_name, sizeof(core_name), "%hu", + cfg->core_id); + if (result < 0) + return result; + } + + result = ocf_core_set_name(&cache->core_obj[cfg->core_id], core_name, + sizeof(core_name)); + if (result) + return result; + + ocf_cache_log(cache, log_debug, "Inserting core %s\n", core_name); + + if (cfg->try_add) + result = _ocf_mngt_cache_try_add_core(cache, core, cfg); + else + result = _ocf_mngt_cache_add_core(cache, core, cfg); + + if (!result) { + ocf_core_log(*core, log_info, "Successfully added\n"); + } else { + if (result == -OCF_ERR_CORE_NOT_AVAIL) { + ocf_cache_log(cache, log_err, "Core %s is zero size\n", + core_name); + } + ocf_cache_log(cache, log_err, "Adding core %s failed\n", + core_name); + } + + return result; +} + +int ocf_mngt_cache_add_core(ocf_cache_t cache, ocf_core_t *core, + struct ocf_mngt_core_config *cfg) +{ + int result; + + OCF_CHECK_NULL(cache); + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_mngt_cache_add_core_nolock(cache, core, cfg); + + ocf_mngt_cache_unlock(cache); + + return result; +} + +static int _ocf_mngt_cache_remove_core(ocf_core_t core, bool detach) +{ + struct ocf_cache *cache = core->obj.cache; + ocf_core_id_t core_id = ocf_core_get_id(core); + int status; + + if (detach) { + status = cache_mng_core_close(cache, core_id); + if (!status) { + cache->ocf_core_inactive_count++; + env_bit_set(ocf_cache_state_incomplete, + &cache->cache_state); + } + return status; + } + + /* Deinit everything*/ + if (ocf_cache_is_device_attached(cache)) { + cache_mng_core_deinit_attached_meta(cache, core_id); + cache_mng_core_remove_from_cleaning_pol(cache, core_id); + } + cache_mng_core_remove_from_meta(cache, core_id); + cache_mng_core_remove_from_cache(cache, core_id); + cache_mng_core_close(cache, core_id); + + /* Update super-block with core device removal */ + ocf_metadata_flush_superblock(cache); + + return 0; +} + +int ocf_mngt_cache_remove_core_nolock(ocf_cache_t cache, ocf_core_id_t core_id, + bool detach) +{ + int result; + ocf_core_t core; + const char *core_name; + + OCF_CHECK_NULL(cache); + + result = ocf_core_get(cache, core_id, &core); + if (result < 0) + return -OCF_ERR_CORE_NOT_AVAIL; + + ocf_core_log(core, log_debug, "Removing core\n"); + + core_name = ocf_core_get_name(core); + + result = _ocf_mngt_cache_remove_core(core, detach); + if (!result) { + ocf_cache_log(cache, log_info, "Core %s successfully removed\n", + core_name); + } else { + ocf_cache_log(cache, log_err, "Removing core %s failed\n", + core_name); + } + + return result; +} + +int ocf_mngt_cache_remove_core(ocf_cache_t cache, ocf_core_id_t core_id, + bool detach) +{ + int result; + + OCF_CHECK_NULL(cache); + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = ocf_mngt_cache_remove_core_nolock(cache, core_id, detach); + + ocf_mngt_cache_unlock(cache); + + return result; +} diff --git a/src/mngt/ocf_mngt_core_pool.c b/src/mngt/ocf_mngt_core_pool.c new file mode 100644 index 0000000..4b7c31d --- /dev/null +++ b/src/mngt/ocf_mngt_core_pool.c @@ -0,0 +1,123 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../ocf_core_priv.h" +#include "../ocf_ctx_priv.h" + +void ocf_mngt_core_pool_init(ocf_ctx_t ctx) +{ + OCF_CHECK_NULL(ctx); + INIT_LIST_HEAD(&ctx->core_pool.core_pool_head); +} + +int ocf_mngt_core_pool_get_count(ocf_ctx_t ctx) +{ + int count; + OCF_CHECK_NULL(ctx); + env_mutex_lock(&ctx->lock); + count = ctx->core_pool.core_pool_count; + env_mutex_unlock(&ctx->lock); + return count; +} + +int ocf_mngt_core_pool_add(ocf_ctx_t ctx, ocf_uuid_t uuid, uint8_t type) +{ + ocf_data_obj_t obj; + + int result = 0; + + OCF_CHECK_NULL(ctx); + + result = ocf_ctx_data_obj_create(ctx, &obj, uuid, type); + if (result) + return result; + + result = ocf_data_obj_open(obj); + if (result) { + ocf_data_obj_deinit(obj); + return result; + } + + env_mutex_lock(&ctx->lock); + list_add(&obj->core_pool_item, &ctx->core_pool.core_pool_head); + ctx->core_pool.core_pool_count++; + env_mutex_unlock(&ctx->lock); + return result; +} + +int ocf_mngt_core_pool_visit(ocf_ctx_t ctx, + int (*visitor)(ocf_uuid_t, void *), void *visitor_ctx) +{ + int result = 0; + ocf_data_obj_t sobj; + + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(visitor); + + env_mutex_lock(&ctx->lock); + list_for_each_entry(sobj, &ctx->core_pool.core_pool_head, + core_pool_item) { + result = visitor(&sobj->uuid, visitor_ctx); + if (result) + break; + } + env_mutex_unlock(&ctx->lock); + return result; +} + +ocf_data_obj_t ocf_mngt_core_pool_lookup(ocf_ctx_t ctx, ocf_uuid_t uuid, + ocf_data_obj_type_t type) +{ + ocf_data_obj_t sobj; + + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(uuid); + OCF_CHECK_NULL(uuid->data); + + list_for_each_entry(sobj, &ctx->core_pool.core_pool_head, + core_pool_item) { + if (sobj->type == type && !env_strncmp(sobj->uuid.data, + uuid->data, min(sobj->uuid.size, uuid->size))) { + return sobj; + } + } + + return NULL; +} + +void ocf_mngt_core_pool_remove(ocf_ctx_t ctx, ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(obj); + env_mutex_lock(&ctx->lock); + ctx->core_pool.core_pool_count--; + list_del(&obj->core_pool_item); + env_mutex_unlock(&ctx->lock); + ocf_data_obj_deinit(obj); +} + +void ocf_mngt_core_pool_close_and_remove(ocf_ctx_t ctx, ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(obj); + ocf_data_obj_close(obj); + ocf_mngt_core_pool_remove(ctx, obj); +} + +void ocf_mngt_core_pool_deinit(ocf_ctx_t ctx) +{ + ocf_data_obj_t sobj, tobj; + + OCF_CHECK_NULL(ctx); + + list_for_each_entry_safe(sobj, tobj, &ctx->core_pool.core_pool_head, + core_pool_item) { + ocf_mngt_core_pool_close_and_remove(ctx, sobj); + } +} diff --git a/src/mngt/ocf_mngt_flush.c b/src/mngt/ocf_mngt_flush.c new file mode 100644 index 0000000..89bcb9c --- /dev/null +++ b/src/mngt/ocf_mngt_flush.c @@ -0,0 +1,803 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../metadata/metadata.h" +#include "../cleaning/cleaning.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_part.h" +#include "../ocf_def_priv.h" + +static inline void _ocf_mngt_begin_flush(struct ocf_cache *cache) +{ + env_mutex_lock(&cache->flush_mutex); + + env_atomic_set(&cache->flush_started, 1); + + env_waitqueue_wait(cache->pending_dirty_wq, + !env_atomic_read(&cache->pending_dirty_requests)); +} + +static inline void _ocf_mngt_end_flush(struct ocf_cache *cache) +{ + env_atomic_set(&cache->flush_started, 0); + + env_mutex_unlock(&cache->flush_mutex); +} + +bool ocf_mngt_cache_is_dirty(ocf_cache_t cache) +{ + uint32_t i; + + for (i = 0; i < OCF_CORE_MAX; ++i) { + if (!cache->core_conf_meta[i].added) + continue; + + if (env_atomic_read(&(cache->core_runtime_meta[i]. + dirty_clines))) { + return true; + } + } + + return false; +} + +/************************FLUSH CORE CODE**************************************/ +/* Returns: + * 0 if OK and tbl & num is filled: + * * tbl - table with sectors&cacheline + * * num - number of items in this table. + * other value means error. + * NOTE: + * Table is not sorted. + */ +static int _ocf_mngt_get_sectors(struct ocf_cache *cache, int core_id, + struct flush_data **tbl, uint32_t *num) +{ + uint64_t core_line; + ocf_core_id_t i_core_id; + struct flush_data *p; + uint32_t i, j, dirty = 0; + + dirty = env_atomic_read(&cache->core_runtime_meta[core_id]. + dirty_clines); + if (!dirty) { + *num = 0; + *tbl = NULL; + return 0; + } + + p = env_vmalloc(dirty * sizeof(**tbl)); + if (!p) + return -OCF_ERR_NO_MEM; + + for (i = 0, j = 0; i < cache->device->collision_table_entries; i++) { + ocf_metadata_get_core_info(cache, i, &i_core_id, &core_line); + + if (i_core_id != core_id) + continue; + + if (!metadata_test_valid_any(cache, i)) + continue; + + if (!metadata_test_dirty(cache, i)) + continue; + + if (ocf_cache_line_is_used(cache, i)) + continue; + + /* It's core_id cacheline and it's valid and it's dirty! */ + p[j].cache_line = i; + p[j].core_line = core_line; + p[j].core_id = i_core_id; + j++; + /* stop if all cachelines were found */ + if (j == dirty) + break; + } + + ocf_core_log(&cache->core_obj[core_id], log_debug, + "%u dirty cache lines to clean\n", j); + + if (dirty != j) { + ocf_cache_log(cache, log_debug, "Wrong number of dirty " + "blocks for flushing core %s (%u!=%u)\n", + cache->core_obj[core_id].name, j, dirty); + } + + + *tbl = p; + *num = j; + return 0; +} + +static void _ocf_mngt_free_sectors(void *tbl) +{ + env_vfree(tbl); +} + +static int _ocf_mngt_get_flush_containers(ocf_cache_t cache, + struct flush_container **fctbl, uint32_t *fcnum) +{ + struct flush_container *fc; + struct flush_container *curr; + uint32_t *core_revmap; + uint32_t num; + uint64_t core_line; + ocf_core_id_t core_id; + uint32_t i, j, dirty = 0; + int step = 0; + + /* + * TODO: Create containers for each physical device, not for + * each core. Cores can be partitions of single device. + */ + + num = cache->conf_meta->core_obj_count; + if (num == 0) { + *fcnum = 0; + return 0; + } + + core_revmap = env_vzalloc(sizeof(*core_revmap) * OCF_CORE_MAX); + if (!core_revmap) + return -OCF_ERR_NO_MEM; + + /* TODO: Alloc flush_containers and data tables in single allocation */ + fc = env_vzalloc(sizeof(**fctbl) * num); + if (!fc) { + env_vfree(core_revmap); + return -OCF_ERR_NO_MEM; + } + + for (i = 0, j = 0; i < OCF_CORE_MAX; i++) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + fc[j].core_id = i; + core_revmap[i] = j; + + /* Check for dirty blocks */ + fc[j].count = env_atomic_read(&cache-> + core_runtime_meta[i].dirty_clines); + dirty += fc[j].count; + + if (fc[j].count) { + fc[j].flush_data = env_vmalloc(fc[j].count * + sizeof(*fc[j].flush_data)); + } + + if (++j == cache->conf_meta->core_obj_count) + break; + } + + if (!dirty) { + env_vfree(core_revmap); + env_vfree(fc); + *fcnum = 0; + return 0; + } + + for (i = 0, j = 0; i < cache->device->collision_table_entries; i++) { + ocf_metadata_get_core_info(cache, i, &core_id, &core_line); + + if (!metadata_test_valid_any(cache, i)) + continue; + + if (!metadata_test_dirty(cache, i)) + continue; + + if (ocf_cache_line_is_used(cache, i)) + continue; + + curr = &fc[core_revmap[core_id]]; + + ENV_BUG_ON(curr->iter >= curr->count); + + /* It's core_id cacheline and it's valid and it's dirty! */ + curr->flush_data[curr->iter].cache_line = i; + curr->flush_data[curr->iter].core_line = core_line; + curr->flush_data[curr->iter].core_id = core_id; + curr->iter++; + + j++; + /* stop if all cachelines were found */ + if (j == dirty) + break; + + OCF_COND_RESCHED(step, 1000000) + } + + if (dirty != j) { + ocf_cache_log(cache, log_debug, "Wrong number of dirty " + "blocks (%u!=%u)\n", j, dirty); + for (i = 0; i < num; i++) + fc[i].count = fc[i].iter; + } + + for (i = 0; i < num; i++) + fc[i].iter = 0; + + env_vfree(core_revmap); + *fctbl = fc; + *fcnum = num; + return 0; +} + +static void _ocf_mngt_free_flush_containers(struct flush_container *fctbl, + uint32_t num) +{ + int i; + + for (i = 0; i < num; i++) + env_vfree(fctbl[i].flush_data); + env_vfree(fctbl); +} + +/* + * OCF will try to guess disk speed etc. and adjust flushing block + * size accordingly, however these bounds shall be respected regardless + * of disk speed, cache line size configured etc. + */ +#define OCF_MNG_FLUSH_MIN (4*MiB / ocf_line_size(cache)) +#define OCF_MNG_FLUSH_MAX (100*MiB / ocf_line_size(cache)) + +static void _ocf_mngt_flush_portion(struct flush_container *fc) +{ + ocf_cache_t cache = fc->cache; + uint64_t flush_portion_div; + uint32_t curr_count; + + flush_portion_div = env_ticks_to_msecs(fc->ticks2 - fc->ticks1); + if (unlikely(!flush_portion_div)) + flush_portion_div = 1; + + fc->flush_portion = fc->flush_portion * 1000 / flush_portion_div; + fc->flush_portion &= ~0x3ffULL; + + /* regardless those calculations, limit flush portion to be + * between OCF_MNG_FLUSH_MIN and OCF_MNG_FLUSH_MAX + */ + fc->flush_portion = MIN(fc->flush_portion, OCF_MNG_FLUSH_MAX); + fc->flush_portion = MAX(fc->flush_portion, OCF_MNG_FLUSH_MIN); + + curr_count = MIN(fc->count - fc->iter, fc->flush_portion); + + ocf_cleaner_do_flush_data_async(fc->cache, + &fc->flush_data[fc->iter], + curr_count, &fc->attribs); + + fc->iter += curr_count; +} + +static void _ocf_mngt_flush_end(void *private_data, int error) +{ + struct flush_container *fc = private_data; + + fc->ticks2 = env_get_tick_count(); + + env_atomic_cmpxchg(fc->error, 0, error); + + env_atomic_set(&fc->completed, 1); + env_atomic_inc(fc->progress); + env_waitqueue_wake_up(fc->wq); +} + +static int _ocf_mngt_flush_containers(ocf_cache_t cache, + struct flush_container *fctbl, uint32_t fcnum, + bool allow_interruption) +{ + uint32_t fc_to_flush; + env_waitqueue wq; + env_atomic progress; /* incremented each time flushing of a portion of a + container is completed */ + env_atomic error; + ocf_core_t core; + bool interrupt = false; + int i; + + if (fcnum == 0) + return 0; + + env_waitqueue_init(&wq); + + /* Sort data. Smallest sectors first (0...n). */ + ocf_cleaner_sort_flush_containers(fctbl, fcnum); + + env_atomic_set(&error, 0); + + for (i = 0; i < fcnum; i++) { + fctbl[i].attribs.cache_line_lock = true; + fctbl[i].attribs.metadata_locked = true; + fctbl[i].attribs.cmpl_context = &fctbl[i]; + fctbl[i].attribs.cmpl_fn = _ocf_mngt_flush_end; + fctbl[i].attribs.io_queue = 0; + fctbl[i].cache = cache; + fctbl[i].progress = &progress; + fctbl[i].error = &error; + fctbl[i].wq = &wq; + fctbl[i].flush_portion = OCF_MNG_FLUSH_MIN; + fctbl[i].ticks1 = 0; + fctbl[i].ticks2 = UINT_MAX; + env_atomic_set(&fctbl[i].completed, 1); + } + + for (fc_to_flush = fcnum; fc_to_flush > 0;) { + env_atomic_set(&progress, 0); + for (i = 0; i < fcnum; i++) { + if (!env_atomic_read(&fctbl[i].completed)) + continue; + + core = &cache->core_obj[fctbl[i].core_id]; + env_atomic_set(&core->flushed, fctbl[i].iter); + env_atomic_set(&fctbl[i].completed, 0); + + if (fctbl[i].iter == fctbl[i].count || interrupt || + env_atomic_read(&error)) { + fc_to_flush--; + continue; + } + + _ocf_mngt_flush_portion(&fctbl[i]); + } + if (fc_to_flush) { + ocf_metadata_unlock(cache, OCF_METADATA_WR); + env_cond_resched(); + env_waitqueue_wait(wq, env_atomic_read(&progress)); + ocf_metadata_lock(cache, OCF_METADATA_WR); + } + if (cache->flushing_interrupted && !interrupt) { + if (allow_interruption) { + interrupt = true; + ocf_cache_log(cache, log_info, + "Flushing interrupted by " + "user\n"); + } else { + ocf_cache_log(cache, log_err, + "Cannot interrupt flushing\n"); + } + } + } + + return interrupt ? -OCF_ERR_FLUSHING_INTERRUPTED : + env_atomic_read(&error); +} + +static int _ocf_mngt_flush_core(ocf_core_t core, bool allow_interruption) +{ + ocf_core_id_t core_id = ocf_core_get_id(core); + ocf_cache_t cache = core->obj.cache; + struct flush_container fc; + int ret; + + ocf_metadata_lock(cache, OCF_METADATA_WR); + + ret = _ocf_mngt_get_sectors(cache, core_id, + &fc.flush_data, &fc.count); + if (ret) { + ocf_core_log(core, log_err, "Flushing operation aborted, " + "no memory\n"); + goto out; + } + + fc.core_id = core_id; + fc.iter = 0; + + ret = _ocf_mngt_flush_containers(cache, &fc, 1, allow_interruption); + + _ocf_mngt_free_sectors(fc.flush_data); + +out: + ocf_metadata_unlock(cache, OCF_METADATA_WR); + return ret; +} + +static int _ocf_mngt_flush_all_cores(ocf_cache_t cache, bool allow_interruption) +{ + struct flush_container *fctbl = NULL; + uint32_t fcnum = 0; + int ret; + + ocf_metadata_lock(cache, OCF_METADATA_WR); + + /* Get all 'dirty' sectors for all cores */ + ret = _ocf_mngt_get_flush_containers(cache, &fctbl, &fcnum); + if (ret) { + ocf_cache_log(cache, log_err, "Flushing operation aborted, " + "no memory\n"); + goto out; + } + + ret = _ocf_mngt_flush_containers(cache, fctbl, fcnum, + allow_interruption); + + _ocf_mngt_free_flush_containers(fctbl, fcnum); + +out: + ocf_metadata_unlock(cache, OCF_METADATA_WR); + return ret; +} + +/** + * Flush all the dirty data stored on cache (all the cores attached to it) + * @param cache cache instance to which operation applies + * @param allow_interruption whenever to allow interruption of flushing process. + * if set to 0, all requests to interrupt flushing will be ignored + */ +static int _ocf_mng_cache_flush_nolock(ocf_cache_t cache, bool interruption) +{ + int result = 0; + int i, j; + + env_atomic_set(&cache->flush_in_progress, 1); + cache->flushing_interrupted = 0; + do { + env_cond_resched(); + result = _ocf_mngt_flush_all_cores(cache, interruption); + if (result) { + /* Cleaning error */ + break; + } + } while (ocf_mngt_cache_is_dirty(cache)); + + env_atomic_set(&cache->flush_in_progress, 0); + for (i = 0, j = 0; i < OCF_CORE_MAX; i++) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + env_atomic_set(&cache->core_obj[i].flushed, 0); + + if (++j == cache->conf_meta->core_obj_count) + break; + } + + return result; +} + +int ocf_mngt_cache_flush_nolock(ocf_cache_t cache, bool interruption) +{ + int result = 0; + + OCF_CHECK_NULL(cache); + + if (ocf_cache_is_incomplete(cache)) { + ocf_cache_log(cache, log_err, "Cannot flush cache - " + "cache is in incomplete state\n"); + return -OCF_ERR_CACHE_IN_INCOMPLETE_STATE; + } + + ocf_cache_log(cache, log_info, "Flushing cache\n"); + + _ocf_mngt_begin_flush(cache); + + result = _ocf_mng_cache_flush_nolock(cache, interruption); + + _ocf_mngt_end_flush(cache); + + if (!result) + ocf_cache_log(cache, log_info, "Flushing cache completed\n"); + + return result; +} + +static int _ocf_mng_core_flush_nolock(ocf_core_t core, bool interruption) +{ + struct ocf_cache *cache = core->obj.cache; + ocf_core_id_t core_id = ocf_core_get_id(core); + int ret; + + cache->flushing_interrupted = 0; + do { + env_cond_resched(); + ret = _ocf_mngt_flush_core(core, interruption); + if (ret == -OCF_ERR_FLUSHING_INTERRUPTED || + ret == -OCF_ERR_WRITE_CORE) { + break; + } + } while (env_atomic_read(&cache->core_runtime_meta[core_id]. + dirty_clines)); + + env_atomic_set(&core->flushed, 0); + + return ret; +} + +int ocf_mngt_core_flush_nolock(ocf_cache_t cache, ocf_core_id_t id, + bool interruption) +{ + ocf_core_t core; + int ret = 0; + + OCF_CHECK_NULL(cache); + + ret = ocf_core_get(cache, id, &core); + if (ret < 0) + return -OCF_ERR_CORE_NOT_AVAIL; + + if (!core->opened) { + ocf_core_log(core, log_err, "Cannot flush - core is in " + "inactive state\n"); + return -OCF_ERR_CORE_IN_INACTIVE_STATE; + } + + ocf_core_log(core, log_info, "Flushing\n"); + + _ocf_mngt_begin_flush(cache); + + ret = _ocf_mng_core_flush_nolock(core, interruption); + + _ocf_mngt_end_flush(cache); + + if (!ret) + ocf_cache_log(cache, log_info, "Flushing completed\n"); + + return ret; +} + +int ocf_mngt_cache_flush(ocf_cache_t cache, bool interruption) +{ + int result = ocf_mngt_cache_read_lock(cache); + + if (result) + return result; + + if (!ocf_cache_is_device_attached(cache)) { + result = -OCF_ERR_INVAL; + goto unlock; + } + + result = ocf_mngt_cache_flush_nolock(cache, interruption); + +unlock: + ocf_mngt_cache_read_unlock(cache); + return result; +} + +int ocf_mngt_core_flush(ocf_cache_t cache, ocf_core_id_t id, bool interruption) +{ + int result; + + /* lock read only */ + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + if (!ocf_cache_is_device_attached(cache)) { + result = -OCF_ERR_INVAL; + goto unlock; + } + + result = ocf_mngt_core_flush_nolock(cache, id, interruption); + +unlock: + ocf_mngt_cache_read_unlock(cache); + return result; +} + +int ocf_mngt_core_purge(ocf_cache_t cache, ocf_core_id_t core_id, bool interruption) +{ + int result = 0; + uint64_t core_size = ~0ULL; + ocf_core_t core; + + OCF_CHECK_NULL(cache); + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + result = ocf_core_get(cache, core_id, &core); + if (result < 0) { + ocf_mngt_cache_unlock(cache); + return -OCF_ERR_CORE_NOT_AVAIL; + } + + core_size = ocf_data_obj_get_length(&cache->core_obj[core_id].obj); + core_size = core_size ?: ~0ULL; + + _ocf_mngt_begin_flush(cache); + + ocf_core_log(core, log_info, "Purging\n"); + + result = _ocf_mng_core_flush_nolock(core, interruption); + + if (result) + goto err; + + OCF_METADATA_LOCK_WR(); + result = ocf_metadata_sparse_range(cache, core_id, 0, + core_size); + OCF_METADATA_UNLOCK_WR(); + +err: + _ocf_mngt_end_flush(cache); + + ocf_mngt_cache_read_unlock(cache); + + return result; +} + +int ocf_mngt_cache_purge(ocf_cache_t cache, bool interruption) +{ + int result = 0; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + _ocf_mngt_begin_flush(cache); + + ocf_cache_log(cache, log_info, "Purging\n"); + + result = _ocf_mng_cache_flush_nolock(cache, interruption); + + if (result) + goto err; + + OCF_METADATA_LOCK_WR(); + result = ocf_metadata_sparse_range(cache, OCF_CORE_ID_INVALID, 0, + ~0ULL); + OCF_METADATA_UNLOCK_WR(); + +err: + _ocf_mngt_end_flush(cache); + + ocf_mngt_cache_read_unlock(cache); + + return result; +} + +int ocf_mngt_cache_flush_interrupt(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + ocf_cache_log(cache, log_alert, "Flushing interrupt\n"); + cache->flushing_interrupted = 1; + return 0; +} + +int ocf_mngt_cache_cleaning_set_policy(ocf_cache_t cache, ocf_cleaning_t type) +{ + + ocf_cleaning_t old_type; + int ret; + + if (type < 0 || type >= ocf_cleaning_max) + return -OCF_ERR_INVAL; + + + ret = ocf_mngt_cache_lock(cache); + if (ret) + return ret; + + old_type = cache->conf_meta->cleaning_policy_type; + + if (type == old_type) { + ocf_cache_log(cache, log_info, "Cleaning policy %s is already " + "set\n", cleaning_policy_ops[old_type].name); + goto out; + } + + ocf_metadata_lock(cache, OCF_METADATA_WR); + + if (cleaning_policy_ops[old_type].deinitialize) + cleaning_policy_ops[old_type].deinitialize(cache); + + if (cleaning_policy_ops[type].initialize) { + if (cleaning_policy_ops[type].initialize(cache, 1)) { + /* + * If initialization of new cleaning policy failed, + * we set cleaning policy to nop. + */ + type = ocf_cleaning_nop; + ret = -OCF_ERR_INVAL; + } + } + + cache->conf_meta->cleaning_policy_type = type; + + if (type != old_type) { + /* + * If operation was successfull or cleaning policy changed, + * we need to flush superblock. + */ + if (ocf_metadata_flush_superblock(cache)) { + ocf_cache_log(cache, log_err, + "Failed to flush superblock! Changes " + "in cache config are not persistent!\n"); + } + } + + ocf_cache_log(cache, log_info, "Changing cleaning policy from " + "%s to %s\n", cleaning_policy_ops[old_type].name, + cleaning_policy_ops[type].name); + + ocf_metadata_unlock(cache, OCF_METADATA_WR); + +out: + ocf_mngt_cache_unlock(cache); + + return ret; +} + +int ocf_mngt_cache_cleaning_get_policy(ocf_cache_t cache, ocf_cleaning_t *type) +{ + int ret; + + ret = ocf_mngt_cache_read_lock(cache); + if (ret) + return ret; + + *type = cache->conf_meta->cleaning_policy_type; + + ocf_mngt_cache_read_unlock(cache); + + return 0; +} + +int ocf_mngt_cache_cleaning_set_param(ocf_cache_t cache, ocf_cleaning_t type, + uint32_t param_id, uint32_t param_value) +{ + int ret; + + if (type < 0 || type >= ocf_cleaning_max) + return -OCF_ERR_INVAL; + + if (!cleaning_policy_ops[type].set_cleaning_param) + return -OCF_ERR_INVAL; + + ret = ocf_mngt_cache_lock(cache); + if (ret) + return ret; + + ocf_metadata_lock(cache, OCF_METADATA_WR); + + ret = cleaning_policy_ops[type].set_cleaning_param(cache, + param_id, param_value); + + if (ret == 0) { + /* + * If operation was successfull or cleaning policy changed, + * we need to flush superblock. + */ + if (ocf_metadata_flush_superblock(cache)) { + ocf_cache_log(cache, log_err, + "Failed to flush superblock! Changes " + "in cache config are not persistent!\n"); + } + } + + ocf_metadata_unlock(cache, OCF_METADATA_WR); + + ocf_mngt_cache_unlock(cache); + + return ret; +} + +int ocf_mngt_cache_cleaning_get_param(ocf_cache_t cache, ocf_cleaning_t type, + uint32_t param_id, uint32_t *param_value) +{ + int ret; + + if (type < 0 || type >= ocf_cleaning_max) + return -OCF_ERR_INVAL; + + if (!cleaning_policy_ops[type].get_cleaning_param) + return -OCF_ERR_INVAL; + + ret = ocf_mngt_cache_read_lock(cache); + if (ret) + return ret; + + ret = cleaning_policy_ops[type].get_cleaning_param(cache, + param_id, param_value); + + ocf_mngt_cache_read_unlock(cache); + + return ret; +} diff --git a/src/mngt/ocf_mngt_io_class.c b/src/mngt/ocf_mngt_io_class.c new file mode 100644 index 0000000..d1ca81b --- /dev/null +++ b/src/mngt/ocf_mngt_io_class.c @@ -0,0 +1,273 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_part.h" +#include "../eviction/ops.h" +#include "ocf_env.h" + +static uint64_t _ocf_mngt_count_parts_min_size(struct ocf_cache *cache) +{ + struct ocf_user_part *part; + ocf_part_id_t part_id; + uint64_t count = 0; + + for_each_part(cache, part, part_id) { + if (ocf_part_is_valid(part)) + count += part->config->min_size; + } + + return count; +} + +int ocf_mngt_add_partition_to_cache(struct ocf_cache *cache, + ocf_part_id_t part_id, const char *name, uint32_t min_size, + uint32_t max_size, uint8_t priority, bool valid) +{ + uint32_t size; + + if (!name) + return -OCF_ERR_INVAL; + + if (part_id >= OCF_IO_CLASS_MAX) + return -OCF_ERR_INVAL; + + if (cache->user_parts[part_id].config->flags.valid) + return -OCF_ERR_INVAL; + + if (max_size > PARTITION_SIZE_MAX) + return -OCF_ERR_INVAL; + + if (env_strnlen(name, OCF_IO_CLASS_NAME_MAX) >= + OCF_IO_CLASS_NAME_MAX) { + ocf_cache_log(cache, log_info, + "Name of the partition is too long\n"); + return -OCF_ERR_INVAL; + } + + size = sizeof(cache->user_parts[part_id].config->name); + if (env_strncpy(cache->user_parts[part_id].config->name, size, name, size)) + return -OCF_ERR_INVAL; + + cache->user_parts[part_id].config->min_size = min_size; + cache->user_parts[part_id].config->max_size = max_size; + cache->user_parts[part_id].config->priority = priority; + cache->user_parts[part_id].config->cache_mode = ocf_cache_mode_max; + + ocf_part_set_valid(cache, part_id, valid); + ocf_lst_add(&cache->lst_part, part_id); + ocf_part_sort(cache); + + cache->user_parts[part_id].config->flags.added = 1; + + return 0; +} + +static int _ocf_mngt_set_partition_size(struct ocf_cache *cache, + ocf_part_id_t part_id, uint32_t min, uint32_t max) +{ + struct ocf_user_part *part = &cache->user_parts[part_id]; + + if (min > max) + return -OCF_ERR_INVAL; + + if (_ocf_mngt_count_parts_min_size(cache) + min + >= cache->device->collision_table_entries) { + /* Illegal configuration in which sum of all min_sizes exceeds + * cache size. + */ + return -OCF_ERR_INVAL; + } + + if (max > PARTITION_SIZE_MAX) + max = PARTITION_SIZE_MAX; + + part->config->min_size = min; + part->config->max_size = max; + + return 0; +} + +static int _ocf_mngt_io_class_configure(ocf_cache_t cache, + const struct ocf_mngt_io_class_config *cfg) +{ + int result = -1; + struct ocf_user_part *dest_part; + + ocf_part_id_t part_id = cfg->class_id; + const char *name = cfg->name; + int16_t prio = cfg->prio; + ocf_cache_mode_t cache_mode = cfg->cache_mode; + uint32_t min = cfg->min_size; + uint32_t max = cfg->max_size; + + OCF_CHECK_NULL(cache->device); + + OCF_METADATA_LOCK_WR(); + + dest_part = &cache->user_parts[part_id]; + + if (!ocf_part_is_added(dest_part)) { + ocf_cache_log(cache, log_info, "Setting IO class, id: %u, " + "name: '%s' [ ERROR ]\n", part_id, dest_part->config->name); + OCF_METADATA_UNLOCK_WR(); + return -OCF_ERR_INVAL; + } + + if (part_id == PARTITION_DEFAULT) { + /* Special behavior for default partition */ + + if (!name[0]) { + /* Removing of default partition is not allowed */ + ocf_cache_log(cache, log_info, + "Cannot remove unclassified IO class, " + "id: %u [ ERROR ]\n", part_id); + + OCF_METADATA_UNLOCK_WR(); + + return 0; + } + + /* Try set partition size */ + if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { + ocf_cache_log(cache, log_info, + "Setting IO class size, id: %u, name: '%s' " + "[ ERROR ]\n", part_id, dest_part->config->name); + OCF_METADATA_UNLOCK_WR(); + return -OCF_ERR_INVAL; + } + ocf_part_set_prio(cache, dest_part, prio); + ocf_part_sort(cache); + dest_part->config->cache_mode = cache_mode; + + ocf_cache_log(cache, log_info, + "Updating Unclassified IO class, id: " + "%u [ OK ]\n", part_id); + + OCF_METADATA_UNLOCK_WR(); + + return 0; + } + + if (name[0]) { + /* Setting */ + result = env_strncpy(dest_part->config->name, sizeof(dest_part->config->name), name, + sizeof(dest_part->config->name)); + if (result) { + OCF_METADATA_UNLOCK_WR(); + return result; + } + + /* Try set partition size */ + if (_ocf_mngt_set_partition_size(cache, part_id, min, max)) { + ocf_cache_log(cache, log_info, + "Setting IO class size, id: %u, name: '%s' " + "[ ERROR ]\n", part_id, dest_part->config->name); + OCF_METADATA_UNLOCK_WR(); + return -OCF_ERR_INVAL; + } + + if (ocf_part_is_valid(dest_part)) { + /* Updating existing */ + ocf_cache_log(cache, log_info, "Updating existing IO " + "class, id: %u, name: '%s' [ OK ]\n", + part_id, dest_part->config->name); + + } else { + /* Adding new */ + ocf_part_set_valid(cache, part_id, true); + + ocf_cache_log(cache, log_info, "Adding new IO class, " + "id: %u, name: '%s' [ OK ]\n", part_id, + dest_part->config->name); + } + + ocf_part_set_prio(cache, dest_part, prio); + dest_part->config->cache_mode = cache_mode; + + result = 0; + + } else { + /* Clearing */ + + if (ocf_part_is_valid(dest_part)) { + /* Removing */ + + result = 0; + + ocf_part_set_valid(cache, part_id, false); + + ocf_cache_log(cache, log_info, + "Removing IO class, id: %u [ %s ]\n", + part_id, result ? "ERROR" : "OK"); + + } else { + /* Does not exist */ + result = -OCF_ERR_IO_CLASS_NOT_EXIST; + } + } + + ocf_part_sort(cache); + + OCF_METADATA_UNLOCK_WR(); + + return result; +} + +static int _ocf_mngt_io_class_validate_cfg(ocf_cache_t cache, + const struct ocf_mngt_io_class_config *cfg) +{ + if (cfg->class_id >= OCF_IO_CLASS_MAX) + return -OCF_ERR_INVAL; + + /* TODO(r.baldyga): ocf_cache_mode_max is allowed for compatibility + * with OCF 3.1 kernel adapter (upgrade in flight) and casadm. + * Forbid ocf_cache_mode_max after fixing these problems. + */ + if (cfg->cache_mode < ocf_cache_mode_none || + cfg->cache_mode > ocf_cache_mode_max) { + return -OCF_ERR_INVAL; + } + + if (!ocf_part_is_name_valid(cfg->name)) { + ocf_cache_log(cache, log_info, + "The name of the partition is not valid\n"); + return -OCF_ERR_INVAL; + } + + if (!ocf_part_is_prio_valid(cfg->prio)) { + ocf_cache_log(cache, log_info, + "Invalid value of the partition priority\n"); + return -OCF_ERR_INVAL; + } + + return 0; +} + +int ocf_mngt_io_class_configure(ocf_cache_t cache, + const struct ocf_mngt_io_class_config *cfg) +{ + int result; + + OCF_CHECK_NULL(cache); + + result = _ocf_mngt_io_class_validate_cfg(cache, cfg); + if (result) + return result; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + result = _ocf_mngt_io_class_configure(cache, cfg); + + ocf_mngt_cache_unlock(cache); + return 0; +} + diff --git a/src/mngt/ocf_mngt_misc.c b/src/mngt/ocf_mngt_misc.c new file mode 100644 index 0000000..002b23c --- /dev/null +++ b/src/mngt/ocf_mngt_misc.c @@ -0,0 +1,29 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../ocf_ctx_priv.h" + +uint32_t ocf_mngt_cache_get_count(ocf_ctx_t ctx) +{ + struct ocf_cache *cache; + uint32_t count = 0; + + OCF_CHECK_NULL(ctx); + + env_mutex_lock(&ctx->lock); + + /* currently, there are no macros in list.h to get list size.*/ + list_for_each_entry(cache, &ctx->caches, list) + count++; + + env_mutex_unlock(&ctx->lock); + + return count; +} diff --git a/src/ocf_cache.c b/src/ocf_cache.c new file mode 100644 index 0000000..da45173 --- /dev/null +++ b/src/ocf_cache.c @@ -0,0 +1,219 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "metadata/metadata.h" +#include "engine/cache_engine.h" +#include "utils/utils_cache_line.h" +#include "ocf_priv.h" +#include "ocf_cache_priv.h" + +ocf_data_obj_t ocf_cache_get_data_object(ocf_cache_t cache) +{ + return ocf_cache_is_device_attached(cache) ? &cache->device->obj : NULL; +} + +ocf_cache_id_t ocf_cache_get_id(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return cache->cache_id; +} + +int ocf_cache_set_name(ocf_cache_t cache, const char *src, size_t src_size) +{ + OCF_CHECK_NULL(cache); + return env_strncpy(cache->name, sizeof(cache->name), src, src_size); +} + +const char *ocf_cache_get_name(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return cache->name; +} + +bool ocf_cache_is_incomplete(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return env_bit_test(ocf_cache_state_incomplete, &cache->cache_state); +} + +bool ocf_cache_is_running(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return env_bit_test(ocf_cache_state_running, &cache->cache_state); +} + +bool ocf_cache_is_device_attached(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return env_atomic_read(&(cache)->attached); +} + +ocf_cache_mode_t ocf_cache_get_mode(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return cache->conf_meta->cache_mode; +} + +static uint32_t _calc_dirty_for(uint64_t dirty_since) +{ + return dirty_since ? + (env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000) + : 0; +} + +int ocf_cache_get_info(ocf_cache_t cache, struct ocf_cache_info *info) +{ + uint32_t i; + uint32_t cache_occupancy_total = 0; + uint32_t dirty_blocks_total = 0; + uint32_t initial_dirty_blocks_total = 0; + uint32_t flushed_total = 0; + uint32_t curr_dirty_cnt; + uint64_t dirty_since = 0; + uint32_t init_dirty_cnt; + uint64_t core_dirty_since; + uint32_t dirty_blocks_inactive = 0; + uint32_t cache_occupancy_inactive = 0; + int result; + + OCF_CHECK_NULL(cache); + + if (!info) + return -OCF_ERR_INVAL; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + ENV_BUG_ON(env_memset(info, sizeof(*info), 0)); + + info->attached = ocf_cache_is_device_attached(cache); + if (info->attached) { + info->data_obj_type = ocf_ctx_get_data_obj_type_id(cache->owner, + cache->device->obj.type); + info->size = cache->conf_meta->cachelines; + } + info->core_count = cache->conf_meta->core_obj_count; + + info->cache_mode = ocf_cache_get_mode(cache); + + /* iterate through all possibly valid core objcts, as list of + * valid objects may be not continuous + */ + for (i = 0; i != OCF_CORE_MAX; ++i) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + /* If current dirty blocks exceeds saved initial dirty + * blocks then update the latter + */ + curr_dirty_cnt = env_atomic_read(&cache-> + core_runtime_meta[i].dirty_clines); + init_dirty_cnt = env_atomic_read(&cache-> + core_runtime_meta[i].initial_dirty_clines); + if (init_dirty_cnt && + (curr_dirty_cnt > init_dirty_cnt)) { + env_atomic_set( + &cache->core_runtime_meta[i]. + initial_dirty_clines, + env_atomic_read(&cache-> + core_runtime_meta[i].dirty_clines)); + } + cache_occupancy_total += env_atomic_read(&cache-> + core_runtime_meta[i].cached_clines); + + dirty_blocks_total += env_atomic_read(&(cache-> + core_runtime_meta[i].dirty_clines)); + initial_dirty_blocks_total += env_atomic_read(&(cache-> + core_runtime_meta[i].initial_dirty_clines)); + + if (!cache->core_obj[i].opened) { + cache_occupancy_inactive += env_atomic_read(&cache-> + core_runtime_meta[i].cached_clines); + + dirty_blocks_inactive += env_atomic_read(&(cache-> + core_runtime_meta[i].dirty_clines)); + } + core_dirty_since = env_atomic64_read(&cache-> + core_runtime_meta[i].dirty_since); + if (core_dirty_since) { + dirty_since = (dirty_since ? + MIN(dirty_since, core_dirty_since) : + core_dirty_since); + } + + flushed_total += env_atomic_read( + &cache->core_obj[i].flushed); + } + + info->dirty = dirty_blocks_total; + info->dirty_initial = initial_dirty_blocks_total; + info->occupancy = cache_occupancy_total; + info->dirty_for = _calc_dirty_for(dirty_since); + info->metadata_end_offset = ocf_cache_is_device_attached(cache) ? + cache->device->metadata_offset_line : 0; + + info->state = cache->cache_state; + info->inactive.occupancy = cache_occupancy_inactive; + info->inactive.dirty = dirty_blocks_inactive; + info->flushed = (env_atomic_read(&cache->flush_in_progress)) ? + flushed_total : 0; + + info->fallback_pt.status = ocf_fallback_pt_is_on(cache); + info->fallback_pt.error_counter = + env_atomic_read(&cache->fallback_pt_error_counter); + + info->eviction_policy = cache->conf_meta->eviction_policy_type; + info->cleaning_policy = cache->conf_meta->cleaning_policy_type; + info->metadata_footprint = ocf_cache_is_device_attached(cache) ? + ocf_metadata_size_of(cache) : 0; + info->cache_line_size = ocf_line_size(cache); + + ocf_mngt_cache_read_unlock(cache); + + return 0; +} + +const struct ocf_data_obj_uuid *ocf_cache_get_uuid(ocf_cache_t cache) +{ + if (!ocf_cache_is_device_attached(cache)) + return NULL; + + return ocf_data_obj_get_uuid(ocf_cache_get_data_object(cache)); +} + +uint8_t ocf_cache_get_type_id(ocf_cache_t cache) +{ + if (!ocf_cache_is_device_attached(cache)) + return 0xff; + + return ocf_ctx_get_data_obj_type_id(ocf_cache_get_ctx(cache), + ocf_data_obj_get_type(ocf_cache_get_data_object(cache))); +} + +ocf_cache_line_size_t ocf_cache_get_line_size(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return ocf_line_size(cache); +} + +uint64_t ocf_cache_bytes_2_lines(ocf_cache_t cache, uint64_t bytes) +{ + OCF_CHECK_NULL(cache); + return ocf_bytes_2_lines(cache, bytes); +} + +uint32_t ocf_cache_get_core_count(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return cache->conf_meta->core_obj_count; +} + +ocf_ctx_t ocf_cache_get_ctx(ocf_cache_t cache) +{ + OCF_CHECK_NULL(cache); + return cache->owner; +} diff --git a/src/ocf_cache_priv.h b/src/ocf_cache_priv.h new file mode 100644 index 0000000..3cbc899 --- /dev/null +++ b/src/ocf_cache_priv.h @@ -0,0 +1,223 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_CACHE_PRIV_H__ +#define __OCF_CACHE_PRIV_H__ + +#include "ocf/ocf.h" +#include "ocf_env.h" +#include "ocf_data_obj_priv.h" +#include "ocf_core_priv.h" +#include "metadata/metadata_structs.h" +#include "metadata/metadata_partition_structs.h" +#include "metadata/metadata_updater_priv.h" +#include "utils/utils_list.h" +#include "ocf_stats_priv.h" +#include "cleaning/cleaning.h" +#include "ocf_logger_priv.h" + +#define DIRTY_FLUSHED 1 +#define DIRTY_NOT_FLUSHED 0 + +struct ocf_metadata_uuid { + uint32_t size; + uint8_t data[OCF_DATA_OBJ_UUID_MAX_SIZE]; +} __packed; + +#define OCF_CORE_USER_DATA_SIZE 64 + +struct ocf_core_meta_config { + uint8_t type; + + /* This bit means that object was added into cache */ + uint32_t added : 1; + + /* Core sequence number used to correlate cache lines with cores + * when recovering from atomic device */ + ocf_seq_no_t seq_no; + + /* Sequential cutoff threshold (in bytes) */ + uint32_t seq_cutoff_threshold; + + /* Sequential cutoff policy */ + ocf_seq_cutoff_policy seq_cutoff_policy; + + /* core object size in bytes */ + uint64_t length; + + uint8_t user_data[OCF_CORE_USER_DATA_SIZE]; +}; + +struct ocf_core_meta_runtime { + /* Number of blocks from that objects that currently are cached + * on the caching device. + */ + env_atomic cached_clines; + env_atomic dirty_clines; + env_atomic initial_dirty_clines; + + env_atomic64 dirty_since; + + struct { + /* clines within lru list (?) */ + env_atomic cached_clines; + /* dirty clines assigned to this specific partition within + * cache device + */ + env_atomic dirty_clines; + } part_counters[OCF_IO_CLASS_MAX]; +}; + +/** + * @brief Initialization mode of cache instance + */ +enum ocf_mngt_cache_init_mode { + /** + * @brief Set up an SSD as new caching device + */ + ocf_init_mode_init, + + /** + * @brief Set up an SSD as new caching device without saving cache + * metadata on SSD. + * + * When using this initialization mode, after shutdown, loading cache + * is not possible + */ + ocf_init_mode_metadata_volatile, + + /** + * @brief Load pre-existing SSD cache state and set all parameters + * to previous configurations + */ + ocf_init_mode_load, +}; + +/* Cache device */ +struct ocf_cache_device { + struct ocf_data_obj obj; + + ocf_cache_line_t metadata_offset_line; + + /* Hash Table contains contains pointer to the entry in + * Collision Table so it actually contains collision Table + * indexes. + * Invalid entry is collision_table_entries. + */ + unsigned int hash_table_entries; + unsigned int collision_table_entries; + + int metadata_error; + /*!< This field indicates that an error during metadata IO + * occurred + */ + + uint64_t metadata_offset; + + struct ocf_part *freelist_part; + + struct { + struct ocf_cache_concurrency *cache; + } concurrency; + + enum ocf_mngt_cache_init_mode init_mode; + + struct ocf_superblock_runtime *runtime_meta; +}; + +struct ocf_cache { + ocf_ctx_t owner; + + struct list_head list; + /* set to make valid */ + uint8_t valid_ocf_cache_device_t; + /* unset running to not serve any more I/O requests */ + unsigned long cache_state; + + env_atomic ref_count; + + struct ocf_superblock_config *conf_meta; + + struct ocf_cache_device *device; + + struct ocf_lst lst_part; + struct ocf_user_part user_parts[OCF_IO_CLASS_MAX + 1]; + + struct ocf_metadata metadata; + + ocf_eviction_t eviction_policy_init; + + int cache_id; + + char name[OCF_CACHE_NAME_SIZE]; + + env_atomic pending_requests; + + env_atomic pending_cache_requests; + env_waitqueue pending_cache_wq; + + env_atomic pending_dirty_requests; + env_waitqueue pending_dirty_wq; + + uint32_t fallback_pt_error_threshold; + env_atomic fallback_pt_error_counter; + + env_atomic pending_read_misses_list_blocked; + env_atomic pending_read_misses_list_count; + + env_atomic last_access_ms; + + env_atomic pending_eviction_clines; + + struct ocf_queue *io_queues; + uint32_t io_queues_no; + + uint16_t ocf_core_inactive_count; + struct ocf_core core_obj[OCF_CORE_MAX]; + struct ocf_core_meta_config *core_conf_meta; + struct ocf_core_meta_runtime *core_runtime_meta; + + env_atomic flush_in_progress; + env_atomic flush_started; + + /* 1 if cache device attached, 0 otherwise */ + env_atomic attached; + + env_atomic cleaning[OCF_IO_CLASS_MAX]; + + struct ocf_cleaner cleaner; + struct ocf_metadata_updater metadata_updater; + + env_rwsem lock; + env_atomic lock_waiter; + /*!< most of the time this variable is set to 0, unless user requested + *!< interruption of flushing process via ioctl/ + */ + int flushing_interrupted; + env_mutex flush_mutex; + + struct { + uint32_t max_queue_size; + uint32_t queue_unblock_size; + } backfill; + + bool pt_unaligned_io; + + bool use_submit_io_fast; + + void *cleaning_policy_context; +}; + +#define ocf_cache_log_prefix(cache, lvl, prefix, fmt, ...) \ + ocf_log_prefix(ocf_cache_get_ctx(cache), lvl, "[Cache %s] ", \ + prefix fmt, ocf_cache_get_name(cache), ##__VA_ARGS__) + +#define ocf_cache_log(cache, lvl, fmt, ...) \ + ocf_cache_log_prefix(cache, lvl, "", fmt, ##__VA_ARGS__) + +#define ocf_cache_log_rl(cache) \ + ocf_log_rl(ocf_cache_get_ctx(cache)) + +#endif /* __OCF_CACHE_PRIV_H__ */ diff --git a/src/ocf_core.c b/src/ocf_core.c new file mode 100644 index 0000000..007bcce --- /dev/null +++ b/src/ocf_core.c @@ -0,0 +1,652 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_priv.h" +#include "ocf_core_priv.h" +#include "ocf_io_priv.h" +#include "metadata/metadata.h" +#include "engine/cache_engine.h" +#include "utils/utils_rq.h" +#include "utils/utils_part.h" +#include "utils/utils_device.h" +#include "ocf_request.h" + +ocf_cache_t ocf_core_get_cache(ocf_core_t core) +{ + OCF_CHECK_NULL(core); + return core->obj.cache; +} + +ocf_data_obj_t ocf_core_get_data_object(ocf_core_t core) +{ + OCF_CHECK_NULL(core); + return &core->obj; +} + +ocf_core_id_t ocf_core_get_id(ocf_core_t core) +{ + struct ocf_cache *cache; + ocf_core_id_t core_id; + + OCF_CHECK_NULL(core); + + cache = core->obj.cache; + core_id = core - cache->core_obj; + + return core_id; +} + +int ocf_core_set_name(ocf_core_t core, const char *src, size_t src_size) +{ + OCF_CHECK_NULL(core); + OCF_CHECK_NULL(src); + + return env_strncpy(core->name, sizeof(core->name), src, src_size); +} + +const char *ocf_core_get_name(ocf_core_t core) +{ + OCF_CHECK_NULL(core); + + return core->name; +} + +ocf_core_state_t ocf_core_get_state(ocf_core_t core) +{ + OCF_CHECK_NULL(core); + + return core->opened ? + ocf_core_state_active : ocf_core_state_inactive; +} + +bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id) +{ + OCF_CHECK_NULL(cache); + + if (id > OCF_CORE_ID_MAX || id < OCF_CORE_ID_MIN) + return false; + + if (!env_bit_test(id, cache->conf_meta->valid_object_bitmap)) + return false; + + return true; +} + +int ocf_core_get(ocf_cache_t cache, ocf_core_id_t id, ocf_core_t *core) +{ + OCF_CHECK_NULL(cache); + + if (!ocf_core_is_valid(cache, id)) + return -OCF_ERR_CORE_NOT_AVAIL; + + *core = &cache->core_obj[id]; + return 0; +} + +int ocf_core_set_uuid(ocf_core_t core, const struct ocf_data_obj_uuid *uuid) +{ + struct ocf_cache *cache; + struct ocf_data_obj_uuid *current_uuid; + int result; + int diff; + + OCF_CHECK_NULL(core); + OCF_CHECK_NULL(uuid); + OCF_CHECK_NULL(uuid->data); + + cache = core->obj.cache; + current_uuid = &ocf_core_get_data_object(core)->uuid; + + result = env_memcmp(current_uuid->data, current_uuid->size, + uuid->data, uuid->size, &diff); + if (result) + return result; + + if (!diff) { + /* UUIDs are identical */ + return 0; + } + + result = ocf_uuid_core_set(cache, core, uuid); + if (result) + return result; + + result = ocf_metadata_flush_superblock(cache); + if (result) { + result = -OCF_ERR_WRITE_CACHE; + } + + return result; +} + +static inline void inc_dirty_rq_counter(struct ocf_core_io *core_io, + ocf_cache_t cache) +{ + core_io->dirty = 1; + env_atomic_inc(&cache->pending_dirty_requests); +} + +static inline void dec_counter_if_rq_was_dirty(struct ocf_core_io *core_io, + ocf_cache_t cache) +{ + int pending_dirty_rq_count; + + if (!core_io->dirty) + return; + + pending_dirty_rq_count = + env_atomic_dec_return(&cache->pending_dirty_requests); + + ENV_BUG_ON(pending_dirty_rq_count < 0); + + core_io->dirty = 0; + + if (!pending_dirty_rq_count) + env_waitqueue_wake_up(&cache->pending_dirty_wq); +} + +/* *** CORE IO *** */ + +static inline struct ocf_core_io *ocf_io_to_core_io(struct ocf_io *io) +{ + return container_of(io, struct ocf_core_io, base); +} + +static void ocf_core_io_get(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + int value; + + OCF_CHECK_NULL(io); + + core_io = ocf_io_to_core_io(io); + value = env_atomic_inc_return(&core_io->ref_counter); + + ENV_BUG_ON(value < 1); +} + +static void ocf_core_io_put(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + ocf_cache_t cache; + int value; + + OCF_CHECK_NULL(io); + + core_io = ocf_io_to_core_io(io); + value = env_atomic_dec_return(&core_io->ref_counter); + + ENV_BUG_ON(value < 0); + + if (value) + return; + + cache = ocf_core_get_cache(core_io->core); + + core_io->data = NULL; + env_allocator_del(cache->owner->resources.core_io_allocator, core_io); +} + +static int ocf_core_io_set_data(struct ocf_io *io, + ctx_data_t *data, uint32_t offset) +{ + struct ocf_core_io *core_io; + + OCF_CHECK_NULL(io); + + if (!data || offset) + return -EINVAL; + + core_io = ocf_io_to_core_io(io); + core_io->data = data; + + return 0; +} + +static ctx_data_t *ocf_core_io_get_data(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + + OCF_CHECK_NULL(io); + + core_io = ocf_io_to_core_io(io); + return core_io->data; +} + +uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core) +{ + uint32_t core_id = ocf_core_get_id(core); + ocf_cache_t cache = ocf_core_get_cache(core); + + return cache->core_conf_meta[core_id].seq_cutoff_threshold; +} + +ocf_seq_cutoff_policy ocf_core_get_seq_cutoff_policy(ocf_core_t core) +{ + uint32_t core_id = ocf_core_get_id(core); + ocf_cache_t cache = ocf_core_get_cache(core); + + return cache->core_conf_meta[core_id].seq_cutoff_policy; +} + +const struct ocf_io_ops ocf_core_io_ops = { + .set_data = ocf_core_io_set_data, + .get_data = ocf_core_io_get_data, + .get = ocf_core_io_get, + .put = ocf_core_io_put, +}; + +int ocf_core_set_user_metadata_raw(ocf_core_t core, void *data, size_t size) +{ + ocf_cache_t cache = ocf_core_get_cache(core); + uint32_t core_id = ocf_core_get_id(core); + + if (size > OCF_CORE_USER_DATA_SIZE) + return -EINVAL; + + env_memcpy(cache->core_conf_meta[core_id].user_data, + OCF_CORE_USER_DATA_SIZE, data, size); + + return 0; +} + +int ocf_core_set_user_metadata(ocf_core_t core, void *data, size_t size) +{ + ocf_cache_t cache; + int ret; + + OCF_CHECK_NULL(core); + OCF_CHECK_NULL(data); + + cache = ocf_core_get_cache(core); + + ret = ocf_core_set_user_metadata_raw(core, data, size); + if (ret) + return ret; + + ret = ocf_metadata_flush_superblock(cache); + if (ret) + return -OCF_ERR_WRITE_CACHE; + + return 0; +} + +int ocf_core_get_user_metadata(ocf_core_t core, void *data, size_t size) +{ + uint32_t core_id; + ocf_cache_t cache; + + OCF_CHECK_NULL(core); + + core_id = ocf_core_get_id(core); + cache = ocf_core_get_cache(core); + + if (size > sizeof(cache->core_conf_meta[core_id].user_data)) + return -EINVAL; + + env_memcpy(data, size, cache->core_conf_meta[core_id].user_data, + OCF_CORE_USER_DATA_SIZE); + + return 0; +} + +/* *** OCF API *** */ + +static inline int ocf_validate_io(struct ocf_core_io *core_io) +{ + ocf_cache_t cache = ocf_core_get_cache(core_io->core); + struct ocf_io *io = &core_io->base; + + if (!io->obj) + return -EINVAL; + + if (!io->ops) + return -EINVAL; + + if (io->addr >= ocf_data_obj_get_length(io->obj)) + return -EINVAL; + + if (io->addr + io->bytes > ocf_data_obj_get_length(io->obj)) + return -EINVAL; + + if (io->class >= OCF_IO_CLASS_MAX) + return -EINVAL; + + if (io->dir != OCF_READ && io->dir != OCF_WRITE) + return -EINVAL; + + if (io->io_queue >= cache->io_queues_no) + return -EINVAL; + + if (!io->end) + return -EINVAL; + + return 0; +} + +static void ocf_req_complete(struct ocf_request *req, int error) +{ + /* Complete IO */ + ocf_io_end(req->io, error); + + dec_counter_if_rq_was_dirty(ocf_io_to_core_io(req->io), req->cache); + + /* Invalidate OCF IO, it is not valid after completion */ + ocf_core_io_put(req->io); + req->io = NULL; +} + +struct ocf_io *ocf_new_io(ocf_core_t core) +{ + ocf_cache_t cache; + struct ocf_core_io *core_io; + + OCF_CHECK_NULL(core); + + cache = ocf_core_get_cache(core); + if (!cache) + return NULL; + + core_io = env_allocator_new( + cache->owner->resources.core_io_allocator); + if (!core_io) + return NULL; + + core_io->base.obj = ocf_core_get_data_object(core); + core_io->base.ops = &ocf_core_io_ops; + core_io->core = core; + + env_atomic_set(&core_io->ref_counter, 1); + + return &core_io->base; +} + +int ocf_submit_io_mode(struct ocf_io *io, ocf_cache_mode_t cache_mode) +{ + struct ocf_core_io *core_io; + ocf_req_cache_mode_t req_cache_mode; + ocf_core_t core; + ocf_cache_t cache; + int ret; + + if (!io) + return -EINVAL; + + core_io = ocf_io_to_core_io(io); + + ret = ocf_validate_io(core_io); + if (ret < 0) + return ret; + + core = core_io->core; + cache = ocf_core_get_cache(core); + + if (unlikely(!env_bit_test(ocf_cache_state_running, + &cache->cache_state))) { + ocf_io_end(io, -EIO); + return 0; + } + + /* TODO: instead of casting ocf_cache_mode_t to ocf_req_cache_mode_t + we can resolve IO interface here and get rid of the latter. */ + req_cache_mode = cache_mode; + + if (cache_mode == ocf_cache_mode_none) + req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); + + if (req_cache_mode == ocf_req_cache_mode_wb) { + inc_dirty_rq_counter(core_io, cache); + + //Double cache mode check prevents sending WB request + //while flushing is performed. + req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); + if (req_cache_mode != ocf_req_cache_mode_wb) + dec_counter_if_rq_was_dirty(core_io, cache); + } + + if (cache->conf_meta->valid_parts_no <= 1) + io->class = 0; + + core_io->req = ocf_rq_new(cache, ocf_core_get_id(core), + io->addr, io->bytes, io->dir); + if (!core_io->req) { + dec_counter_if_rq_was_dirty(core_io, cache); + io->end(io, -ENOMEM); + return 0; + } + + if (core_io->req->d2c) + req_cache_mode = ocf_req_cache_mode_d2c; + + core_io->req->io_queue = io->io_queue; + core_io->req->part_id = ocf_part_class2id(cache, io->class); + core_io->req->data = core_io->data; + core_io->req->complete = ocf_req_complete; + core_io->req->io = io; + + ocf_seq_cutoff_update(core, core_io->req); + + ocf_core_update_stats(core, io); + + ocf_core_io_get(io); + ret = ocf_engine_hndl_rq(core_io->req, req_cache_mode); + if (ret) { + dec_counter_if_rq_was_dirty(core_io, cache); + ocf_rq_put(core_io->req); + io->end(io, ret); + } + + return 0; +} + +int ocf_submit_io_fast(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + ocf_req_cache_mode_t req_cache_mode; + struct ocf_request *req; + ocf_core_t core; + ocf_cache_t cache; + int fast; + int ret; + + if (!io) + return -EINVAL; + + core_io = ocf_io_to_core_io(io); + + ret = ocf_validate_io(core_io); + if (ret < 0) + return ret; + + core = core_io->core; + cache = ocf_core_get_cache(core); + + if (unlikely(!env_bit_test(ocf_cache_state_running, + &cache->cache_state))) { + ocf_io_end(io, -EIO); + return 0; + } + + req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); + if (req_cache_mode == ocf_req_cache_mode_wb) { + inc_dirty_rq_counter(core_io, cache); + + //Double cache mode check prevents sending WB request + //while flushing is performed. + req_cache_mode = ocf_get_effective_cache_mode(cache, core, io); + if (req_cache_mode != ocf_req_cache_mode_wb) + dec_counter_if_rq_was_dirty(core_io, cache); + } + + switch (req_cache_mode) { + case ocf_req_cache_mode_pt: + return -EIO; + case ocf_req_cache_mode_wb: + req_cache_mode = ocf_req_cache_mode_fast; + break; + default: + if (cache->use_submit_io_fast) + break; + if (io->dir == OCF_WRITE) + return -EIO; + + req_cache_mode = ocf_req_cache_mode_fast; + } + + if (cache->conf_meta->valid_parts_no <= 1) + io->class = 0; + + core_io->req = ocf_rq_new_extended(cache, ocf_core_get_id(core), + io->addr, io->bytes, io->dir); + // We need additional pointer to req in case completion arrives before + // we leave this function and core_io is freed + req = core_io->req; + + if (!req) { + dec_counter_if_rq_was_dirty(core_io, cache); + io->end(io, -ENOMEM); + return 0; + } + if (req->d2c) { + dec_counter_if_rq_was_dirty(core_io, cache); + ocf_rq_put(req); + return -EIO; + } + + req->io_queue = io->io_queue; + req->part_id = ocf_part_class2id(cache, io->class); + req->data = core_io->data; + req->complete = ocf_req_complete; + req->io = io; + + ocf_core_update_stats(core, io); + ocf_core_io_get(io); + + fast = ocf_engine_hndl_fast_rq(req, req_cache_mode); + if (fast != OCF_FAST_PATH_NO) { + ocf_seq_cutoff_update(core, req); + return 0; + } + + dec_counter_if_rq_was_dirty(core_io, cache); + + ocf_core_io_put(io); + ocf_rq_put(req); + return -EIO; +} + +int ocf_submit_flush(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + ocf_core_t core; + ocf_cache_t cache; + int ret; + + if (!io) + return -EINVAL; + + core_io = ocf_io_to_core_io(io); + + ret = ocf_validate_io(core_io); + if (ret < 0) + return ret; + + core = core_io->core; + cache = ocf_core_get_cache(core); + + if (unlikely(!env_bit_test(ocf_cache_state_running, + &cache->cache_state))) { + ocf_io_end(io, -EIO); + return 0; + } + + core_io->req = ocf_rq_new(cache, ocf_core_get_id(core), + io->addr, io->bytes, io->dir); + if (!core_io->req) { + ocf_io_end(io, -ENOMEM); + return 0; + } + + core_io->req->io_queue = io->io_queue; + core_io->req->complete = ocf_req_complete; + core_io->req->io = io; + core_io->req->data = core_io->data; + + ocf_core_io_get(io); + ocf_engine_hndl_ops_rq(core_io->req); + + return 0; +} + +int ocf_submit_discard(struct ocf_io *io) +{ + struct ocf_core_io *core_io; + ocf_core_t core; + ocf_cache_t cache; + int ret; + + if (!io) + return -EINVAL; + + core_io = ocf_io_to_core_io(io); + + ret = ocf_validate_io(core_io); + if (ret < 0) + return ret; + + core = core_io->core; + cache = ocf_core_get_cache(core); + + if (unlikely(!env_bit_test(ocf_cache_state_running, + &cache->cache_state))) { + ocf_io_end(io, -EIO); + return 0; + } + + core_io->req = ocf_rq_new_discard(cache, ocf_core_get_id(core), + io->addr, io->bytes, OCF_WRITE); + if (!core_io->req) { + ocf_io_end(io, -ENOMEM); + return 0; + } + + core_io->req->io_queue = io->io_queue; + core_io->req->complete = ocf_req_complete; + core_io->req->io = io; + core_io->req->data = core_io->data; + + ocf_core_io_get(io); + ocf_engine_hndl_discard_rq(core_io->req); + + return 0; +} + +int ocf_core_visit(ocf_cache_t cache, ocf_core_visitor_t visitor, void *cntx, + bool only_opened) +{ + ocf_core_id_t id; + int result = 0; + + OCF_CHECK_NULL(cache); + + if (!visitor) + return -OCF_ERR_INVAL; + + for (id = 0; id < OCF_CORE_MAX; id++) { + if (!env_bit_test(id, cache->conf_meta->valid_object_bitmap)) + continue; + + if (only_opened && !cache->core_obj[id].opened) + continue; + + result = visitor(&cache->core_obj[id], cntx); + if (result) + break; + } + + return result; +} + diff --git a/src/ocf_core_priv.h b/src/ocf_core_priv.h new file mode 100644 index 0000000..e1c48df --- /dev/null +++ b/src/ocf_core_priv.h @@ -0,0 +1,56 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_CORE_PRIV_H__ +#define __OCF_CORE_PRIV_H__ + +#include "ocf/ocf.h" +#include "ocf_env.h" +#include "ocf_data_obj_priv.h" + +struct ocf_core_io { + struct ocf_io base; + ocf_core_t core; + + env_atomic ref_counter; + + bool dirty; + /*!< Indicates if io leaves dirty data */ + + struct ocf_request *req; + ctx_data_t *data; +}; + +struct ocf_core { + char name[OCF_CORE_NAME_SIZE]; + + struct ocf_data_obj obj; + + struct { + uint64_t last; + uint64_t bytes; + int rw; + } seq_cutoff; + + env_atomic flushed; + + /* This bit means that object is open*/ + uint32_t opened : 1; + + struct ocf_counters_core *counters; +}; + +bool ocf_core_is_valid(ocf_cache_t cache, ocf_core_id_t id); + +int ocf_core_set_user_metadata_raw(ocf_core_t core, void *data, size_t size); + +#define ocf_core_log_prefix(core, lvl, prefix, fmt, ...) \ + ocf_cache_log_prefix(ocf_core_get_cache(core), lvl, "[Core %s] ", \ + prefix fmt, ocf_core_get_name(core), ##__VA_ARGS__) + +#define ocf_core_log(core, lvl, fmt, ...) \ + ocf_core_log_prefix(core, lvl, "", fmt, ##__VA_ARGS__) + +#endif /* __OCF_CORE_PRIV_H__ */ diff --git a/src/ocf_ctx.c b/src/ocf_ctx.c new file mode 100644 index 0000000..2a4e324 --- /dev/null +++ b/src/ocf_ctx.c @@ -0,0 +1,196 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_ctx_priv.h" +#include "ocf_priv.h" +#include "ocf_data_obj_priv.h" +#include "ocf_utils.h" +#include "ocf_logger_priv.h" + +/* + * + */ +int ocf_ctx_register_data_obj_type(ocf_ctx_t ctx, uint8_t type_id, + const struct ocf_data_obj_properties *properties) +{ + int result = 0; + + if (!ctx || !properties) + return -EINVAL; + + env_mutex_lock(&ctx->lock); + + if (type_id >= OCF_DATA_OBJ_TYPE_MAX || ctx->data_obj_type[type_id]) { + env_mutex_unlock(&ctx->lock); + result = -EINVAL; + goto err; + } + + ocf_data_obj_type_init(&ctx->data_obj_type[type_id], properties); + if (!ctx->data_obj_type[type_id]) + result = -EINVAL; + + env_mutex_unlock(&ctx->lock); + + if (result) + goto err; + + ocf_log(ctx, log_debug, "'%s' data object operations registered\n", + properties->name); + return 0; + +err: + ocf_log(ctx, log_err, "Failed to register data object operations '%s'", + properties->name); + return result; +} + +/* + * + */ +void ocf_ctx_unregister_data_obj_type(ocf_ctx_t ctx, uint8_t type_id) +{ + OCF_CHECK_NULL(ctx); + + env_mutex_lock(&ctx->lock); + + if (type_id < OCF_DATA_OBJ_TYPE_MAX && ctx->data_obj_type[type_id]) { + ocf_data_obj_type_deinit(ctx->data_obj_type[type_id]); + ctx->data_obj_type[type_id] = NULL; + } + + env_mutex_unlock(&ctx->lock); +} + +/* + * + */ +ocf_data_obj_type_t ocf_ctx_get_data_obj_type(ocf_ctx_t ctx, uint8_t type_id) +{ + OCF_CHECK_NULL(ctx); + + if (type_id >= OCF_DATA_OBJ_TYPE_MAX) + return NULL; + + return ctx->data_obj_type[type_id]; +} + +/* + * + */ +int ocf_ctx_get_data_obj_type_id(ocf_ctx_t ctx, ocf_data_obj_type_t type) +{ + int i; + + OCF_CHECK_NULL(ctx); + + for (i = 0; i < OCF_DATA_OBJ_TYPE_MAX; ++i) { + if (ctx->data_obj_type[i] == type) + return i; + } + + return -1; +} + +/* + * + */ +int ocf_ctx_data_obj_create(ocf_ctx_t ctx, ocf_data_obj_t *obj, + struct ocf_data_obj_uuid *uuid, uint8_t type_id) +{ + OCF_CHECK_NULL(ctx); + + if (type_id >= OCF_DATA_OBJ_TYPE_MAX) + return -EINVAL; + + return ocf_data_obj_create(obj, ctx->data_obj_type[type_id], uuid); +} + +/* + * + */ +int ocf_ctx_set_logger(ocf_ctx_t ctx, const struct ocf_logger *logger) +{ + int ret = 0; + + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(logger); + + env_mutex_lock(&ctx->lock); + + if (ctx->logger) { + ret = -EINVAL; + goto out; + } + + if (logger->open) { + ret = logger->open(logger); + if (ret) + goto out; + } + + ctx->logger = logger; + +out: + env_mutex_unlock(&ctx->lock); + return ret; +} + +/* + * + */ +int ocf_ctx_init(ocf_ctx_t *ctx, const struct ocf_ctx_ops *ops) +{ + struct ocf_ctx *ocf_ctx; + + OCF_CHECK_NULL(ctx); + OCF_CHECK_NULL(ops); + + ocf_ctx = env_zalloc(sizeof(*ocf_ctx), ENV_MEM_NORMAL); + if (!ocf_ctx) + return -ENOMEM; + + INIT_LIST_HEAD(&ocf_ctx->caches); + if (env_mutex_init(&ocf_ctx->lock)) { + env_free(ocf_ctx); + return -ENOMEM; + } + ocf_ctx->ctx_ops = ops; + + if (ocf_utils_init(ocf_ctx)) { + env_free(ocf_ctx); + return -ENOMEM; + } + + *ctx = ocf_ctx; + + return 0; +} + +/* + * + */ +int ocf_ctx_exit(ocf_ctx_t ctx) +{ + int result = 0; + + OCF_CHECK_NULL(ctx); + + /* Check if caches are setup */ + env_mutex_lock(&ctx->lock); + if (!list_empty(&ctx->caches)) + result = -EEXIST; + env_mutex_unlock(&ctx->lock); + if (result) + return result; + + ocf_utils_deinit(ctx); + if (ctx->logger && ctx->logger->close) + ctx->logger->close(ctx->logger); + env_free(ctx); + + return 0; +} diff --git a/src/ocf_ctx_priv.h b/src/ocf_ctx_priv.h new file mode 100644 index 0000000..7317ed6 --- /dev/null +++ b/src/ocf_ctx_priv.h @@ -0,0 +1,189 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_CTX_PRIV_H__ +#define __OCF_CTX_PRIV_H__ + +#include "ocf_env.h" +#include "ocf/ocf_logger.h" +#include "ocf_logger_priv.h" + +#define OCF_DATA_OBJ_TYPE_MAX 8 + +/** + * @brief OCF main control structure + */ +struct ocf_ctx { + const struct ocf_ctx_ops *ctx_ops; + const struct ocf_logger *logger; + struct ocf_data_obj_type *data_obj_type[OCF_DATA_OBJ_TYPE_MAX]; + env_mutex lock; + struct list_head caches; + struct { + struct list_head core_pool_head; + int core_pool_count; + } core_pool; + + struct { + struct ocf_rq_allocator *rq; + env_allocator *core_io_allocator; + } resources; +}; + +#define ocf_log_prefix(ctx, lvl, prefix, fmt, ...) \ + ocf_log_raw(ctx->logger, lvl, OCF_PREFIX_SHORT prefix fmt, ##__VA_ARGS__) + +#define ocf_log(ctx, lvl, fmt, ...) \ + ocf_log_prefix(ctx, lvl, "", fmt, ##__VA_ARGS__) + +#define ocf_log_rl(ctx) \ + ocf_log_raw_rl(ctx->logger, __func__) + +#define ocf_log_stack_trace(ctx) \ + ocf_log_stack_trace_raw(ctx->logger) + +/** + * @name Environment data buffer operations wrappers + * @{ + */ +static inline void *ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages) +{ + return ctx->ctx_ops->data_alloc(pages); +} + +static inline void ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data) +{ + ctx->ctx_ops->data_free(data); +} + +static inline int ctx_data_mlock(ocf_ctx_t ctx, ctx_data_t *data) +{ + return ctx->ctx_ops->data_mlock(data); +} + +static inline void ctx_data_munlock(ocf_ctx_t ctx, ctx_data_t *data) +{ + ctx->ctx_ops->data_munlock(data); +} + +static inline uint32_t ctx_data_rd(ocf_ctx_t ctx, void *dst, + ctx_data_t *src, uint32_t size) +{ + return ctx->ctx_ops->data_rd(dst, src, size); +} + +static inline uint32_t ctx_data_wr(ocf_ctx_t ctx, ctx_data_t *dst, + const void *src, uint32_t size) +{ + return ctx->ctx_ops->data_wr(dst, src, size); +} + +static inline void ctx_data_rd_check(ocf_ctx_t ctx, void *dst, + ctx_data_t *src, uint32_t size) +{ + uint32_t read = ctx_data_rd(ctx, dst, src, size); + + ENV_BUG_ON(read != size); +} + +static inline void ctx_data_wr_check(ocf_ctx_t ctx, ctx_data_t *dst, + const void *src, uint32_t size) +{ + uint32_t written = ctx_data_wr(ctx, dst, src, size); + + ENV_BUG_ON(written != size); +} + +static inline uint32_t ctx_data_zero(ocf_ctx_t ctx, ctx_data_t *dst, + uint32_t size) +{ + return ctx->ctx_ops->data_zero(dst, size); +} + +static inline void ctx_data_zero_check(ocf_ctx_t ctx, ctx_data_t *dst, + uint32_t size) +{ + uint32_t zerored = ctx_data_zero(ctx, dst, size); + + ENV_BUG_ON(zerored != size); +} + +static inline uint32_t ctx_data_seek(ocf_ctx_t ctx, ctx_data_t *dst, + ctx_data_seek_t seek, uint32_t size) +{ + return ctx->ctx_ops->data_seek(dst, seek, size); +} + +static inline void ctx_data_seek_check(ocf_ctx_t ctx, ctx_data_t *dst, + ctx_data_seek_t seek, uint32_t size) +{ + uint32_t bytes = ctx_data_seek(ctx, dst, seek, size); + + ENV_BUG_ON(bytes != size); +} + +static inline uint64_t ctx_data_cpy(ocf_ctx_t ctx, ctx_data_t *dst, ctx_data_t *src, + uint64_t to, uint64_t from, uint64_t bytes) +{ + return ctx->ctx_ops->data_cpy(dst, src, to, from, bytes); +} + +static inline void ctx_data_secure_erase(ocf_ctx_t ctx, ctx_data_t *dst) +{ + return ctx->ctx_ops->data_secure_erase(dst); +} + +static inline int ctx_queue_init(ocf_ctx_t ctx, ocf_queue_t queue) +{ + return ctx->ctx_ops->queue_init(queue); +} + +static inline void ctx_queue_kick(ocf_ctx_t ctx, ocf_queue_t queue, + bool allow_sync) +{ + if (allow_sync && ctx->ctx_ops->queue_kick_sync) + ctx->ctx_ops->queue_kick_sync(queue); + else + ctx->ctx_ops->queue_kick(queue); +} + +static inline void ctx_queue_stop(ocf_ctx_t ctx, ocf_queue_t queue) +{ + ctx->ctx_ops->queue_stop(queue); +} + +static inline int ctx_cleaner_init(ocf_ctx_t ctx, ocf_cleaner_t cleaner) +{ + return ctx->ctx_ops->cleaner_init(cleaner); +} + +static inline void ctx_cleaner_stop(ocf_ctx_t ctx, ocf_cleaner_t cleaner) +{ + ctx->ctx_ops->cleaner_stop(cleaner); +} + +static inline int ctx_metadata_updater_init(ocf_ctx_t ctx, + ocf_metadata_updater_t mu) +{ + return ctx->ctx_ops->metadata_updater_init(mu); +} + +static inline void ctx_metadata_updater_kick(ocf_ctx_t ctx, + ocf_metadata_updater_t mu) +{ + ctx->ctx_ops->metadata_updater_kick(mu); +} + +static inline void ctx_metadata_updater_stop(ocf_ctx_t ctx, + ocf_metadata_updater_t mu) +{ + ctx->ctx_ops->metadata_updater_stop(mu); +} + +/** + * @} + */ + +#endif /* __OCF_CTX_PRIV_H__ */ diff --git a/src/ocf_data_obj.c b/src/ocf_data_obj.c new file mode 100644 index 0000000..92058ef --- /dev/null +++ b/src/ocf_data_obj.c @@ -0,0 +1,247 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_priv.h" +#include "ocf_data_obj_priv.h" +#include "ocf_io_priv.h" +#include "ocf_env.h" + +/* + * This is io allocator dedicated for bottom devices. + * Out IO structure looks like this: + * --------------> +-------------------------+ + * | OCF is aware | | + * | of this part. | struct ocf_io_meta | + * | | | + * | +-------------------------+ <---------------- + * | | | Bottom adapter | + * | | struct ocf_io | is aware of | + * | | | this part. | + * --------------> +-------------------------+ | + * | | | + * | Bottom adapter specific | | + * | data structure. | | + * | | | + * +-------------------------+ <---------------- + */ + +#define OCF_IO_ALLOCATOR_TOTAL_SIZE(size) \ + (sizeof(struct ocf_io_meta) + sizeof(struct ocf_io) + size) + +static env_allocator *ocf_io_allocator_create(uint32_t size, const char *name) +{ + return env_allocator_create(OCF_IO_ALLOCATOR_TOTAL_SIZE(size), name); +} + +static void ocf_io_allocator_destroy(env_allocator *allocator) +{ + env_allocator_destroy(allocator); +} + +static struct ocf_io *ocf_io_allocator_new(env_allocator *allocator) +{ + void *data = env_allocator_new(allocator); + + return data ? (data + sizeof(struct ocf_io_meta)) : NULL; +} + +static void ocf_io_allocator_del(env_allocator *allocator, struct ocf_io *io) +{ + if (!io) + return; + + env_allocator_del(allocator, (void *)io - sizeof(struct ocf_io_meta)); +} + +/* + * Data object type + */ + +int ocf_data_obj_type_init(struct ocf_data_obj_type **type, + const struct ocf_data_obj_properties *properties) +{ + const struct ocf_data_obj_ops *ops = &properties->ops; + struct ocf_data_obj_type *new_type; + int ret; + + if (!ops->new_io || !ops->submit_io || !ops->open || !ops->close || + !ops->get_max_io_size || !ops->get_length) { + return -EINVAL; + } + + if (properties->caps.atomic_writes && !ops->submit_metadata) + return -EINVAL; + + new_type = env_zalloc(sizeof(**type), ENV_MEM_NORMAL); + if (!new_type) + return -OCF_ERR_NO_MEM; + + new_type->allocator = ocf_io_allocator_create( + properties->io_context_size, properties->name); + if (!new_type->allocator) { + ret = -ENOMEM; + goto err; + } + + new_type->properties = properties; + + *type = new_type; + + return 0; + +err: + env_free(new_type); + return ret; +} + +void ocf_data_obj_type_deinit(struct ocf_data_obj_type *type) +{ + ocf_io_allocator_destroy(type->allocator); + env_free(type); +} + +/* + * Data object + */ + +ocf_data_obj_type_t ocf_data_obj_get_type(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + return obj->type; +} + +void *ocf_data_obj_get_priv(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + return obj->priv; +} + +void ocf_data_obj_set_priv(ocf_data_obj_t obj, void *priv) +{ + OCF_CHECK_NULL(obj); + + obj->priv = priv; +} + +const struct ocf_data_obj_uuid *ocf_data_obj_get_uuid( + ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + return &obj->uuid; +} + +uint64_t ocf_data_obj_get_length(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + return obj->type->properties->ops.get_length(obj); +} + + +ocf_cache_t ocf_data_obj_get_cache(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + return obj->cache; +} + +int ocf_data_obj_init(ocf_data_obj_t obj, ocf_data_obj_type_t type, + struct ocf_data_obj_uuid *uuid, bool uuid_copy) +{ + if (!obj || !type) + return -OCF_ERR_INVAL; + + obj->type = type; + + if (!uuid) { + obj->uuid_copy = false; + return 0; + } + + obj->uuid_copy = uuid_copy; + + if (uuid_copy) { + obj->uuid.data = env_strdup(uuid->data, ENV_MEM_NORMAL); + if (!obj->uuid.data) + return -OCF_ERR_NO_MEM; + } else { + obj->uuid.data = uuid->data; + } + + obj->uuid.size = uuid->size; + + return 0; +} + +void ocf_data_obj_deinit(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + if (obj->uuid_copy && obj->uuid.data) + env_free(obj->uuid.data); +} + +int ocf_data_obj_create(ocf_data_obj_t *obj, ocf_data_obj_type_t type, + struct ocf_data_obj_uuid *uuid) +{ + ocf_data_obj_t tmp_obj; + int ret; + + OCF_CHECK_NULL(obj); + + tmp_obj = env_zalloc(sizeof(*tmp_obj), ENV_MEM_NORMAL); + if (!tmp_obj) + return -OCF_ERR_NO_MEM; + + ret = ocf_data_obj_init(tmp_obj, type, uuid, true); + if (ret) { + env_free(tmp_obj); + return ret; + } + + *obj = tmp_obj; + + return 0; +} + +void ocf_data_obj_destroy(ocf_data_obj_t obj) +{ + OCF_CHECK_NULL(obj); + + ocf_data_obj_deinit(obj); + env_free(obj); +} + +struct ocf_io *ocf_data_obj_new_io(ocf_data_obj_t obj) +{ + struct ocf_io *io; + + OCF_CHECK_NULL(obj); + + io = ocf_io_allocator_new(obj->type->allocator); + if (!io) + return NULL; + + io->obj = obj; + + return io; +} + +void ocf_data_obj_del_io(struct ocf_io* io) +{ + OCF_CHECK_NULL(io); + + ocf_io_allocator_del(io->obj->type->allocator, io); +} + +void *ocf_data_obj_get_data_from_io(struct ocf_io* io) +{ + return (void *)io + sizeof(struct ocf_io); +} + diff --git a/src/ocf_data_obj_priv.h b/src/ocf_data_obj_priv.h new file mode 100644 index 0000000..bee0464 --- /dev/null +++ b/src/ocf_data_obj_priv.h @@ -0,0 +1,115 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_DATA_OBJ_PRIV_H__ +#define __OCF_DATA_OBJ_PRIV_H__ + +#include "ocf_env.h" +#include "ocf_io_priv.h" + +struct ocf_data_obj_type { + const struct ocf_data_obj_properties *properties; + env_allocator *allocator; +}; + +struct ocf_data_obj { + ocf_data_obj_type_t type; + struct ocf_data_obj_uuid uuid; + bool uuid_copy; + void *priv; + ocf_cache_t cache; + struct list_head core_pool_item; + struct { + unsigned discard_zeroes:1; + /* true if reading discarded pages returns 0 */ + } features; +}; + +int ocf_data_obj_type_init(struct ocf_data_obj_type **type, + const struct ocf_data_obj_properties *properties); + +void ocf_data_obj_type_deinit(struct ocf_data_obj_type *type); + +static inline struct ocf_io *ocf_dobj_new_io(ocf_data_obj_t obj) +{ + ENV_BUG_ON(!obj->type->properties->ops.new_io); + + return obj->type->properties->ops.new_io(obj); +} + +static inline void ocf_dobj_submit_io(struct ocf_io *io) +{ + ENV_BUG_ON(!io->obj->type->properties->ops.submit_io); + + io->obj->type->properties->ops.submit_io(io); +} + +static inline void ocf_dobj_submit_flush(struct ocf_io *io) +{ + ENV_BUG_ON(!io->obj->type->properties->ops.submit_flush); + /* + * TODO(rbaldyga): Maybe we should supply function for checking + * submit_flush availability and return -ENOTSUPP here? + */ + if (!io->obj->type->properties->ops.submit_flush) + ocf_io_end(io, 0); + else + io->obj->type->properties->ops.submit_flush(io); +} + +static inline void ocf_dobj_submit_discard(struct ocf_io *io) +{ + ENV_BUG_ON(!io->obj->type->properties->ops.submit_discard); + /* + * TODO(rbaldyga): Maybe we should supply function for checking + * submit_discard availability and return -ENOTSUPP here? + */ + if (!io->obj->type->properties->ops.submit_discard) + ocf_io_end(io, 0); + else + io->obj->type->properties->ops.submit_discard(io); +} + +static inline void ocf_dobj_submit_metadata(struct ocf_io *io) +{ + ENV_BUG_ON(!io->obj->type->properties->ops.submit_metadata); + + io->obj->type->properties->ops.submit_metadata(io); +} + +static inline void ocf_dobj_submit_write_zeroes(struct ocf_io *io) +{ + ENV_BUG_ON(!io->obj->type->properties->ops.submit_write_zeroes); + + io->obj->type->properties->ops.submit_write_zeroes(io); +} + +static inline int ocf_data_obj_open(ocf_data_obj_t obj) +{ + ENV_BUG_ON(!obj->type->properties->ops.open); + + return obj->type->properties->ops.open(obj); +} + +static inline void ocf_data_obj_close(ocf_data_obj_t obj) +{ + ENV_BUG_ON(!obj->type->properties->ops.close); + + obj->type->properties->ops.close(obj); +} + +static inline unsigned int ocf_data_obj_get_max_io_size(ocf_data_obj_t obj) +{ + ENV_BUG_ON(!obj->type->properties->ops.get_max_io_size); + + return obj->type->properties->ops.get_max_io_size(obj); +} + +static inline int ocf_data_obj_is_atomic(ocf_data_obj_t obj) +{ + return obj->type->properties->caps.atomic_writes; +} + +#endif /*__OCF_DATA_OBJ_PRIV_H__ */ diff --git a/src/ocf_def_priv.h b/src/ocf_def_priv.h new file mode 100644 index 0000000..de2ef36 --- /dev/null +++ b/src/ocf_def_priv.h @@ -0,0 +1,58 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_DEF_PRIV_H__ +#define __OCF_DEF_PRIV_H__ + +#include "ocf/ocf.h" +#include "ocf_env.h" + +#define BYTES_TO_SECTORS(x) ((x) >> ENV_SECTOR_SHIFT) +#define SECTORS_TO_BYTES(x) ((x) << ENV_SECTOR_SHIFT) + +#define BYTES_TO_PAGES(x) ((((uint64_t)x) + (PAGE_SIZE - 1)) / PAGE_SIZE) +#define PAGES_TO_BYTES(x) (((uint64_t)x) * PAGE_SIZE) + +#ifndef DIV_ROUND_UP +# define DIV_ROUND_UP(x, y) \ + ({ \ + __typeof__ (x) __x = (x); \ + __typeof__ (y) __y = (y); \ + (__x + __y - 1) / __y; \ + }) +#endif + +#ifndef MAX +# define MAX(x,y) \ + ({ \ + __typeof__ (x) __x = (x); \ + __typeof__ (y) __y = (y); \ + __x > __y ? __x : __y; \ + }) +#endif + +#ifndef MIN +#define MIN(x,y) \ + ({ \ + __typeof__ (x) __x = (x); \ + __typeof__ (y) __y = (y); \ + __x < __y ? __x : __y; \ + }) +#endif + +#define METADATA_VERSION() ((OCF_VERSION_MAIN << 16) + \ + (OCF_VERSION_MAJOR << 8) + OCF_VERSION_MINOR) + +/* call conditional reschedule every 'iterations' calls */ +#define OCF_COND_RESCHED(cnt, iterations) \ + if (unlikely(++(cnt) == (iterations))) { \ + env_cond_resched(); \ + (cnt) = 0; \ + } + +/* call conditional reschedule with default interval */ +#define OCF_COND_RESCHED_DEFAULT(cnt) OCF_COND_RESCHED(cnt, 1000000) + +#endif diff --git a/src/ocf_io_class.c b/src/ocf_io_class.c new file mode 100644 index 0000000..d67a61b --- /dev/null +++ b/src/ocf_io_class.c @@ -0,0 +1,83 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_priv.h" +#include "metadata/metadata.h" +#include "engine/cache_engine.h" +#include "utils/utils_part.h" + +int ocf_io_class_get_info(ocf_cache_t cache, uint32_t io_class, + struct ocf_io_class_info *info) +{ + ocf_part_id_t part_id; + int result; + + OCF_CHECK_NULL(cache); + + if (!info) + return -OCF_ERR_INVAL; + + if (io_class >= OCF_IO_CLASS_MAX) + return -OCF_ERR_INVAL; + part_id = io_class; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + if (!ocf_part_is_valid(&cache->user_parts[part_id])) { + /* Partition does not exist */ + result = -OCF_ERR_IO_CLASS_NOT_EXIST; + goto unlock; + } + + if (env_strncpy(info->name, sizeof(info->name), + cache->user_parts[part_id].config->name, + sizeof(cache->user_parts[part_id].config->name))) { + result = -OCF_ERR_INVAL; + goto unlock; + } + + info->priority = cache->user_parts[part_id].config->priority; + info->curr_size = ocf_cache_is_device_attached(cache) ? + cache->user_parts[part_id].runtime->curr_size : 0; + info->min_size = cache->user_parts[part_id].config->min_size; + info->max_size = cache->user_parts[part_id].config->max_size; + + info->eviction_policy_type = cache->conf_meta->eviction_policy_type; + info->cleaning_policy_type = cache->conf_meta->cleaning_policy_type; + + info->cache_mode = cache->user_parts[part_id].config->cache_mode; + +unlock: + ocf_mngt_cache_read_unlock(cache); + + return result; +} + +int ocf_io_class_visit(ocf_cache_t cache, ocf_io_class_visitor_t visitor, + void *cntx) +{ + struct ocf_user_part *part; + ocf_part_id_t part_id; + int result = 0; + + OCF_CHECK_NULL(cache); + + if (!visitor) + return -OCF_ERR_INVAL; + + for_each_part(cache, part, part_id) { + if (!ocf_part_is_valid(part)) + continue; + + result = visitor(cache, part_id, cntx); + if (result) + break; + } + + return result; +} diff --git a/src/ocf_io_priv.h b/src/ocf_io_priv.h new file mode 100644 index 0000000..a95ca5c --- /dev/null +++ b/src/ocf_io_priv.h @@ -0,0 +1,34 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_IO_PRIV_H__ +#define __OCF_IO_PRIV_H__ + +#include "ocf_request.h" + +struct ocf_io_meta { + struct ocf_request *req; +}; + +static inline void ocf_io_start(struct ocf_io *io) +{ + /* + * We want to call start() callback only once, so after calling + * we set it to NULL to prevent multiple calls. + */ + if (io->start) { + io->start(io); + io->start = NULL; + } +} + +static inline void ocf_io_end(struct ocf_io *io, int error) +{ + if (io->end) + io->end(io, error); + +} + +#endif /* __OCF_IO_PRIV_H__ */ diff --git a/src/ocf_logger.c b/src/ocf_logger.c new file mode 100644 index 0000000..6250ab2 --- /dev/null +++ b/src/ocf_logger.c @@ -0,0 +1,45 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf_env.h" +#include "ocf/ocf_logger.h" +#include "ocf_logger_priv.h" + +/* + * + */ +__attribute__((format(printf, 3, 4))) +int ocf_log_raw(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, ...) +{ + va_list args; + int ret; + + if (!logger->printf) + return -ENOTSUP; + + va_start(args, fmt); + ret = logger->printf(logger, lvl, fmt, args); + va_end(args); + + return ret; +} + +int ocf_log_raw_rl(const struct ocf_logger *logger, const char *func_name) +{ + if (!logger->printf_rl) + return -ENOTSUP; + + return logger->printf_rl(func_name); +} + +/* + * + */ +int ocf_log_stack_trace_raw(const struct ocf_logger *logger) +{ + return !logger->dump_stack ? -ENOTSUP : + logger->dump_stack(logger); +} diff --git a/src/ocf_logger_priv.h b/src/ocf_logger_priv.h new file mode 100644 index 0000000..5ce9c0c --- /dev/null +++ b/src/ocf_logger_priv.h @@ -0,0 +1,18 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_LOGGER_PRIV_H__ +#define __OCF_LOGGER_PRIV_H__ + +__attribute__((format(printf, 3, 4))) +int ocf_log_raw(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, ...); + +int ocf_log_raw_rl(const struct ocf_logger *logger, const char *func_name); + +int ocf_log_stack_trace_raw(const struct ocf_logger *logger); + + +#endif /* __OCF_LOGGER_PRIV_H__ */ diff --git a/src/ocf_metadata.c b/src/ocf_metadata.c new file mode 100644 index 0000000..a3bf7f4 --- /dev/null +++ b/src/ocf_metadata.c @@ -0,0 +1,102 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf_priv.h" +#include "ocf_cache_priv.h" +#include "utils/utils_cache_line.h" + +static inline ocf_cache_line_t ocf_atomic_addr2line( + struct ocf_cache *cache, uint64_t addr) +{ + addr -= cache->device->metadata_offset; + addr = ocf_bytes_2_lines(cache, addr); + return ocf_metadata_map_phy2lg(cache, addr); +} + +static inline uint8_t ocf_atomic_addr2pos(struct ocf_cache *cache, + uint64_t addr) +{ + addr -= cache->device->metadata_offset; + addr = BYTES_TO_SECTORS(addr); + addr %= ocf_line_sectors(cache); + + return addr; +} + +int ocf_metadata_get_atomic_entry(ocf_cache_t cache, + uint64_t addr, struct ocf_atomic_metadata *entry) +{ + OCF_CHECK_NULL(cache); + OCF_CHECK_NULL(entry); + + if (addr > ocf_data_obj_get_length(&cache->device->obj)) + return -EFAULT; + + if (addr < cache->device->metadata_offset) { + /* Metadata IO of OCF */ + ENV_BUG_ON(env_memset(entry, sizeof(*entry), 0)); + } else { + ocf_cache_line_t line = ocf_atomic_addr2line(cache, addr); + uint8_t pos = ocf_atomic_addr2pos(cache, addr); + ocf_core_id_t core_id = OCF_CORE_MAX; + uint64_t core_line = 0; + + ocf_metadata_get_core_info(cache, line, &core_id, &core_line); + + entry->core_seq_no = cache->core_conf_meta[core_id].seq_no; + entry->core_line = core_line; + + entry->valid = metadata_test_valid_one(cache, line, pos); + entry->dirty = metadata_test_dirty_one(cache, line, pos); + } + + return 0; +} + +int ocf_metadata_check_invalid_before(ocf_cache_t cache, uint64_t addr) +{ + ocf_cache_line_t line; + uint8_t pos; + int i; + + OCF_CHECK_NULL(cache); + + line = ocf_atomic_addr2line(cache, addr); + pos = ocf_atomic_addr2pos(cache, addr); + + if (!pos || addr < cache->device->metadata_offset) + return 0; + + for (i = 0; i < pos; i++) { + if (metadata_test_valid_one(cache, line, i)) + return 0; + } + + return i; +} + +int ocf_metadata_check_invalid_after(ocf_cache_t cache, uint64_t addr, + uint32_t bytes) +{ + ocf_cache_line_t line; + uint8_t pos; + int i, count = 0; + + OCF_CHECK_NULL(cache); + + line = ocf_atomic_addr2line(cache, addr + bytes); + pos = ocf_atomic_addr2pos(cache, addr + bytes); + + if (!pos || addr < cache->device->metadata_offset) + return 0; + + for (i = pos; i < ocf_line_sectors(cache); i++) { + if (metadata_test_valid_one(cache, line, i)) + return 0; + + count++; + } + + return count; +} diff --git a/src/ocf_priv.h b/src/ocf_priv.h new file mode 100644 index 0000000..53874ec --- /dev/null +++ b/src/ocf_priv.h @@ -0,0 +1,12 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#ifndef __OCF_PRIV_H__ +#define __OCF_PRIV_H__ + +#include "ocf_env.h" + +#define OCF_CHECK_NULL(p) ENV_BUG_ON(!(p)) + +#endif /* __OCF_PRIV_H__ */ diff --git a/src/ocf_queue.c b/src/ocf_queue.c new file mode 100644 index 0000000..2552774 --- /dev/null +++ b/src/ocf_queue.c @@ -0,0 +1,159 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "ocf/ocf_queue.h" +#include "ocf_priv.h" +#include "ocf_queue_priv.h" +#include "ocf_cache_priv.h" +#include "ocf_ctx_priv.h" +#include "ocf_request.h" +#include "mngt/ocf_mngt_common.h" +#include "engine/cache_engine.h" +#include "ocf_def_priv.h" + +int ocf_alloc_queues(struct ocf_cache *cache) +{ + ENV_BUG_ON(!cache->io_queues_no); + + cache->io_queues = env_zalloc( + sizeof(*cache->io_queues) * cache->io_queues_no, ENV_MEM_NORMAL); + if (!cache->io_queues) + return -ENOMEM; + + return 0; +} + +void ocf_free_queues(struct ocf_cache *cache) +{ + env_free(cache->io_queues); + cache->io_queues_no = 0; + cache->io_queues = NULL; +} + +static void ocf_init_queue(struct ocf_queue *q) +{ + env_atomic_set(&q->io_no, 0); + env_spinlock_init(&q->io_list_lock); + INIT_LIST_HEAD(&q->io_list); +} + +int ocf_start_queues(struct ocf_cache *cache) +{ + int id, result = 0; + struct ocf_queue *q; + + for (id = 0; id < cache->io_queues_no; id++) { + q = &cache->io_queues[id]; + q->cache = cache; + q->id = id; + ocf_init_queue(q); + result = ctx_queue_init(cache->owner, q); + if (result) + break; + } + if (result) { + while (id) { + ctx_queue_stop(cache->owner, + &cache->io_queues[--id]); + } + } + + return result; +} + +void ocf_stop_queues(struct ocf_cache *dev) +{ + int i; + struct ocf_queue *curr; + + ocf_mngt_wait_for_io_finish(dev); + + /* Stop IO threads. */ + for (i = 0 ; i < dev->io_queues_no; i++) { + curr = &dev->io_queues[i]; + ctx_queue_stop(dev->owner, curr); + } +} + +void ocf_io_handle(struct ocf_io *io, void *opaque) +{ + struct ocf_request *req = opaque; + + OCF_CHECK_NULL(req); + + if (req->rw == OCF_WRITE) + req->io_if->write(req); + else + req->io_if->read(req); +} + +void ocf_queue_run(ocf_queue_t q) +{ + struct ocf_request *io_req = NULL; + struct ocf_cache *cache; + unsigned char step = 0; + + OCF_CHECK_NULL(q); + + cache = q->cache; + + while (env_atomic_read(&q->io_no) > 0) { + /* Make sure a request is dequeued. */ + io_req = ocf_engine_pop_rq(cache, q); + + if (!io_req) + continue; + + if (io_req->io && io_req->io->handle) + io_req->io->handle(io_req->io, io_req); + else + ocf_io_handle(io_req->io, io_req); + + /* Voluntary preemption every few requests. + * Prevents soft-lockups if preemption is disabled */ + OCF_COND_RESCHED(step, 128); + } +} + +void ocf_queue_set_priv(ocf_queue_t q, void *priv) +{ + OCF_CHECK_NULL(q); + q->priv = priv; +} + +void *ocf_queue_get_priv(ocf_queue_t q) +{ + OCF_CHECK_NULL(q); + return q->priv; +} + +uint32_t ocf_queue_pending_io(ocf_queue_t q) +{ + OCF_CHECK_NULL(q); + return env_atomic_read(&q->io_no); +} + +ocf_cache_t ocf_queue_get_cache(ocf_queue_t q) +{ + OCF_CHECK_NULL(q); + return q->cache; +} + +uint32_t ocf_queue_get_id(ocf_queue_t q) +{ + OCF_CHECK_NULL(q); + return q->id; +} + +int ocf_cache_get_queue(ocf_cache_t cache, unsigned id, ocf_queue_t *q) +{ + OCF_CHECK_NULL(cache); + + if (!q || id >= cache->io_queues_no) + return -OCF_ERR_INVAL; + + *q = &cache->io_queues[id]; + return 0; +} diff --git a/src/ocf_queue_priv.h b/src/ocf_queue_priv.h new file mode 100644 index 0000000..a067675 --- /dev/null +++ b/src/ocf_queue_priv.h @@ -0,0 +1,31 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_QUEUE_PRIV_H_ +#define OCF_QUEUE_PRIV_H_ + +#include "ocf_env.h" + +struct ocf_queue { + struct ocf_cache *cache; + uint32_t id; + + env_atomic io_no; + + struct list_head io_list; + env_spinlock io_list_lock; + + void *priv; +}; + +int ocf_alloc_queues(struct ocf_cache *cache); + +int ocf_start_queues(struct ocf_cache *cache); + +void ocf_stop_queues(struct ocf_cache *cache); + +void ocf_free_queues(struct ocf_cache *cache); + +#endif diff --git a/src/ocf_request.h b/src/ocf_request.h new file mode 100644 index 0000000..f9d5251 --- /dev/null +++ b/src/ocf_request.h @@ -0,0 +1,202 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_REQUEST_H__ +#define __OCF_REQUEST_H__ + +#include "ocf_env.h" + +struct ocf_req_info { + /* Number of hits, invalid, misses. */ + unsigned int hit_no; + unsigned int invalid_no; + + uint32_t dirty_all; + /*!< Number of dirty line in request*/ + + uint32_t dirty_any; + /*!< Indicates that at least one request is dirty */ + + uint32_t seq_req : 1; + /*!< Sequential cache request flag. */ + + uint32_t seq_cutoff : 1; + /*!< Sequential cut off set for this request */ + + uint32_t flush_metadata : 1; + /*!< This bit tells if metadata flushing is required */ + + uint32_t eviction_error : 1; + /*!< Eviction error flag */ + + uint32_t re_part : 1; + /*!< This bit indicate that in the request some cache lines + * has to be moved to another partition + */ + + uint32_t core_error : 1; + /*!< Error occured during I/O on core device */ + + uint32_t cleaner_cache_line_lock : 1; + /*!< Cleaner flag - acquire cache line lock */ + + uint32_t internal : 1; + /**!< this is an internal request */ +}; + +struct ocf_map_info { + /* If HIT -> pointer to hash_key and coll_idx */ + unsigned int hash_key; + unsigned int coll_idx; + + uint64_t core_line; + + ocf_core_id_t core_id; + /*!< Core id for multi-core requests */ + + uint16_t status : 8; + /*!< Traverse or mapping status - HIT, MISS, etc... */ + + uint16_t rd_locked : 1; + /*!< Indicates if cache line is locked for READ access */ + + uint16_t wr_locked : 1; + /*!< Indicates if cache line is locked for WRITE access */ + + uint16_t invalid : 1; + /*!< This bit indicates that mapping is invalid */ + + uint16_t re_part : 1; + /*!< This bit indicates if cache line need to be moved to the + * new partition + */ + + uint16_t flush : 1; + /*!< This bit indicates if cache line need to be flushed */ + + uint8_t start_flush; + /*!< If rq need flush, contain first sector of range to flush */ + + uint8_t stop_flush; + /*!< If rq need flush, contain last sector of range to flush */ +}; + +/** + * @brief OCF discard request info + */ +struct ocf_req_discard_info { + sector_t sector; + /*!< The start sector for discard request */ + + sector_t nr_sects; + /*!< Number of sectors to be discarded */ + + sector_t handled; + /*!< Number of processed sector during discard operation */ +}; + +/** + * @brief OCF IO request + */ +struct ocf_request { + env_atomic ref_count; + /*!< Reference usage count, once OCF request reaches zero it + * will be de-initialed. Get/Put method are intended to modify + * reference counter + */ + + env_atomic lock_remaining; + /*!< This filed indicates how many cache lines in the request + * map left to be locked + */ + + env_atomic req_remaining; + /*!< In case of IO this field indicates how many IO left to + * accomplish IO + */ + + env_atomic master_remaining; + /*!< Atomic counter for core device */ + + struct ocf_cache *cache; + /*!< Handle to cache instance */ + + const struct ocf_io_if *io_if; + /*!< IO interface */ + + void (*resume)(struct ocf_request *rq); + /*!< OCF request resume callback */ + + ocf_core_id_t core_id; + /*!< This file indicates core id of request */ + + ocf_part_id_t part_id; + /*!< Targeted partition of requests */ + + void *priv; + /*!< Filed for private data, context */ + + void *master_io_req; + /*!< Core device request context (core private info) */ + + ctx_data_t *data; + /*!< Request data*/ + + ctx_data_t *cp_data; + /*!< Copy of request data */ + + uint64_t byte_position; + /*!< LBA byte position of request in code domain */ + + uint64_t core_line_first; + /*! First core line */ + + uint64_t core_line_last; + /*! Last core line */ + + uint32_t byte_length; + /*!< Byte length of OCF reuqest */ + + uint32_t core_line_count; + /*! Core line count */ + + uint32_t alloc_core_line_count; + /*! Core line count for which request was initially allocated */ + + uint32_t io_queue; + /*!< I/O queue id for which request should be submitted */ + + int error; + /*!< This filed indicates an error for OCF request */ + + int rw; + /*!< Indicator of IO direction - Read/Write */ + + struct list_head list; + /*!< List item for OCF IO thread workers */ + + struct ocf_req_info info; + /*!< Detailed request info */ + + uint8_t d2c; + /**!< request affects metadata cachelines (is not direct-to-core) */ + + uint8_t master_io_req_type; + /*!< Core device request context type */ + + void (*complete)(struct ocf_request *ocf_req, int error); + /*!< Request completion funstion */ + + struct ocf_io *io; + /*!< OCF IO associated with request */ + + struct ocf_req_discard_info discard; + + struct ocf_map_info *map; + + struct ocf_map_info __map[]; +}; + +#endif diff --git a/src/ocf_stats.c b/src/ocf_stats.c new file mode 100644 index 0000000..cc03087 --- /dev/null +++ b/src/ocf_stats.c @@ -0,0 +1,399 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_priv.h" +#include "metadata/metadata.h" +#include "engine/cache_engine.h" +#include "utils/utils_part.h" +#include "utils/utils_cache_line.h" + +#ifdef OCF_DEBUG_STATS +static void ocf_stats_debug_init(struct ocf_counters_debug *stats) +{ + int i; + + for (i = 0; i < IO_PACKET_NO; i++) { + env_atomic64_set(&stats->read_size[i], 0); + env_atomic64_set(&stats->write_size[i], 0); + } + + for (i = 0; i < IO_ALIGN_NO; i++) { + env_atomic64_set(&stats->read_align[i], 0); + env_atomic64_set(&stats->write_align[i], 0); + } +} +#endif + +static void ocf_stats_req_init(struct ocf_counters_req *stats) +{ + env_atomic64_set(&stats->full_miss, 0); + env_atomic64_set(&stats->partial_miss, 0); + env_atomic64_set(&stats->total, 0); + env_atomic64_set(&stats->pass_through, 0); +} + +static void ocf_stats_block_init(struct ocf_counters_block *stats) +{ + env_atomic64_set(&stats->read_bytes, 0); + env_atomic64_set(&stats->write_bytes, 0); +} + +static void ocf_stats_part_init(struct ocf_counters_part *stats) +{ + ocf_stats_req_init(&stats->read_reqs); + ocf_stats_req_init(&stats->write_reqs); + + ocf_stats_block_init(&stats->blocks); +} + +static void ocf_stats_error_init(struct ocf_counters_error *stats) +{ + env_atomic_set(&stats->read, 0); + env_atomic_set(&stats->write, 0); +} + +void ocf_stats_init(ocf_core_t core) +{ + int i; + struct ocf_counters_core *exp_obj_stats; + + exp_obj_stats = core->counters; + + ocf_stats_block_init(&exp_obj_stats->core_blocks); + ocf_stats_block_init(&exp_obj_stats->cache_blocks); + + ocf_stats_error_init(&exp_obj_stats->cache_errors); + ocf_stats_error_init(&exp_obj_stats->core_errors); + + for (i = 0; i != OCF_IO_CLASS_MAX; i++) + ocf_stats_part_init(&exp_obj_stats->part_counters[i]); + +#ifdef OCF_DEBUG_STATS + ocf_stats_debug_init(&exp_obj_stats->debug_stats); +#endif +} + +/******************************************************************** + * Function that resets stats, debug and breakdown counters. + * If reset is set the following stats won't be reset: + * - cache_occupancy + * - queue_length + * - debug_counters_read_reqs_issued_seq_hits + * - debug_counters_read_reqs_issued_not_seq_hits + * - debug_counters_read_reqs_issued_read_miss_schedule + * - debug_counters_write_reqs_thread + * - debug_counters_write_reqs_issued_only_hdd + * - debug_counters_write_reqs_issued_both_devs + *********************************************************************/ +int ocf_stats_initialize(ocf_cache_t cache, ocf_core_id_t core_id) +{ + ocf_core_t core; + ocf_core_id_t id; + int result; + + result = ocf_mngt_cache_lock(cache); + if (result) + return result; + + if (core_id != OCF_CORE_ID_INVALID) { + result = ocf_core_get(cache, core_id, &core); + if (!result) + ocf_stats_init(core); + + ocf_mngt_cache_unlock(cache); + return result; + } + + for (id = 0; id < OCF_CORE_MAX; id++) { + if (!env_bit_test(id, cache->conf_meta->valid_object_bitmap)) + continue; + + ocf_stats_init(&cache->core_obj[id]); + } + + ocf_mngt_cache_unlock(cache); + + return 0; +} + +static void copy_req_stats(struct ocf_stats_req *dest, + const struct ocf_counters_req *from) +{ + dest->partial_miss = env_atomic64_read(&from->partial_miss); + dest->full_miss = env_atomic64_read(&from->full_miss); + dest->total = env_atomic64_read(&from->total); + dest->pass_through = env_atomic64_read(&from->pass_through); +} + +static void accum_req_stats(struct ocf_stats_req *dest, + const struct ocf_counters_req *from) +{ + dest->partial_miss += env_atomic64_read(&from->partial_miss); + dest->full_miss += env_atomic64_read(&from->full_miss); + dest->total += env_atomic64_read(&from->total); + dest->pass_through += env_atomic64_read(&from->pass_through); +} + +static void copy_block_stats(struct ocf_stats_block *dest, + const struct ocf_counters_block *from) +{ + dest->read = env_atomic64_read(&from->read_bytes); + dest->write = env_atomic64_read(&from->write_bytes); +} + +static void accum_block_stats(struct ocf_stats_block *dest, + const struct ocf_counters_block *from) +{ + dest->read += env_atomic64_read(&from->read_bytes); + dest->write += env_atomic64_read(&from->write_bytes); +} + +static void copy_error_stats(struct ocf_stats_error *dest, + const struct ocf_counters_error *from) +{ + dest->read = env_atomic_read(&from->read); + dest->write = env_atomic_read(&from->write); +} + +#ifdef OCF_DEBUG_STATS +static void copy_debug_stats(struct ocf_stats_core_debug *dest, + const struct ocf_counters_debug *from) +{ + int i; + + for (i = 0; i < IO_PACKET_NO; i++) { + dest->read_size[i] = env_atomic64_read(&from->read_size[i]); + dest->write_size[i] = env_atomic64_read(&from->write_size[i]); + } + + for (i = 0; i < IO_ALIGN_NO; i++) { + dest->read_align[i] = env_atomic64_read(&from->read_align[i]); + dest->write_align[i] = env_atomic64_read(&from->write_align[i]); + } +} +#endif + +int ocf_io_class_get_stats(ocf_core_t core, uint32_t io_class, + struct ocf_stats_io_class *stats) +{ + int result; + uint32_t part_id; + uint32_t i; + uint32_t cache_occupancy_total = 0; + struct ocf_counters_part *part_stat; + ocf_core_id_t core_id; + ocf_cache_t cache; + + OCF_CHECK_NULL(core); + + core_id = ocf_core_get_id(core); + cache = ocf_core_get_cache(core); + + if (!stats) + return -OCF_ERR_INVAL; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + if (io_class >= OCF_IO_CLASS_MAX) { + result = -OCF_ERR_INVAL; + goto unlock; + } + + part_id = io_class; + + if (!ocf_part_is_valid(&cache->user_parts[part_id])) { + /* Partition does not exist */ + result = -OCF_ERR_IO_CLASS_NOT_EXIST; + goto unlock; + } + + for (i = 0; i != OCF_CORE_MAX; ++i) { + if (!env_bit_test(i, cache->conf_meta-> + valid_object_bitmap)) { + continue; + } + cache_occupancy_total += env_atomic_read(&cache-> + core_runtime_meta[i].cached_clines); + } + + part_stat = &core->counters->part_counters[part_id]; + + stats->occupancy_clines = env_atomic_read(&cache-> + core_runtime_meta[core_id].part_counters[part_id]. + cached_clines); + stats->dirty_clines = env_atomic_read(&cache-> + core_runtime_meta[core_id].part_counters[part_id]. + dirty_clines); + + stats->free_clines = cache->conf_meta->cachelines - + cache_occupancy_total; + + copy_req_stats(&stats->read_reqs, &part_stat->read_reqs); + copy_req_stats(&stats->write_reqs, &part_stat->write_reqs); + + copy_block_stats(&stats->blocks, &part_stat->blocks); + +unlock: + ocf_mngt_cache_read_unlock(cache); + return result; +} + +static uint32_t _calc_dirty_for(uint64_t dirty_since) +{ + return dirty_since ? + (env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000) + : 0; +} + +int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats) +{ + int result; + uint32_t i; + ocf_core_id_t core_id; + ocf_cache_t cache; + struct ocf_counters_core *core_stats = NULL; + struct ocf_counters_part *curr = NULL; + + OCF_CHECK_NULL(core); + + core_id = ocf_core_get_id(core); + cache = ocf_core_get_cache(core); + + if (!stats) + return -OCF_ERR_INVAL; + + result = ocf_mngt_cache_read_lock(cache); + if (result) + return result; + + core_stats = core->counters; + + ENV_BUG_ON(env_memset(stats, sizeof(*stats), 0)); + + stats->core_size_bytes = ocf_data_obj_get_length( + &cache->core_obj[core_id].obj); + stats->core_size = ocf_bytes_2_lines_round_up(cache, + stats->core_size_bytes); + stats->seq_cutoff_threshold = ocf_core_get_seq_cutoff_threshold(core); + stats->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core); + + + env_atomic_read(&cache->core_runtime_meta[core_id].cached_clines); + + copy_block_stats(&stats->core_obj, &core_stats->core_blocks); + copy_block_stats(&stats->cache_obj, &core_stats->cache_blocks); + + copy_error_stats(&stats->core_errors, + &core_stats->core_errors); + copy_error_stats(&stats->cache_errors, + &core_stats->cache_errors); + +#ifdef OCF_DEBUG_STATS + copy_debug_stats(&stats->debug_stat, + &core_stats->debug_stats); +#endif + + for (i = 0; i != OCF_IO_CLASS_MAX; i++) { + curr = &core_stats->part_counters[i]; + + accum_req_stats(&stats->read_reqs, + &curr->read_reqs); + accum_req_stats(&stats->write_reqs, + &curr->write_reqs); + + accum_block_stats(&stats->core, &curr->blocks); + + stats->cache_occupancy += env_atomic_read(&cache-> + core_runtime_meta[core_id].part_counters[i]. + cached_clines); + stats->dirty += env_atomic_read(&cache-> + core_runtime_meta[core_id].part_counters[i]. + dirty_clines); + } + + stats->flushed = env_atomic_read(&core->flushed); + + stats->dirty_for = _calc_dirty_for( + env_atomic64_read(&cache->core_runtime_meta[core_id].dirty_since)); + + ocf_mngt_cache_read_unlock(cache); + + return 0; +} + +#ifdef OCF_DEBUG_STATS + +#define IO_ALIGNMENT_SIZE (IO_ALIGN_NO) +#define IO_PACKET_SIZE ((IO_PACKET_NO) - 1) + +static uint32_t io_alignment[IO_ALIGNMENT_SIZE] = { + 512, 1 * KiB, 2 * KiB, 4 * KiB +}; + +static int to_align_idx(uint64_t off) +{ + int i; + + for (i = IO_ALIGNMENT_SIZE - 1; i >= 0; i--) { + if (off % io_alignment[i] == 0) + return i; + } + + return IO_ALIGNMENT_SIZE; +} + +static uint32_t io_packet_size[IO_PACKET_SIZE] = { + 512, 1 * KiB, 2 * KiB, 4 * KiB, 8 * KiB, + 16 * KiB, 32 * KiB, 64 * KiB, 128 * KiB, + 256 * KiB, 512 * KiB +}; + + +static int to_packet_idx(uint32_t len) +{ + int i = 0; + + for (i = 0; i < IO_PACKET_SIZE; i++) { + if (len == io_packet_size[i]) + return i; + } + + return IO_PACKET_SIZE; +} + +void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io) +{ + struct ocf_counters_debug *stats; + int idx; + + OCF_CHECK_NULL(core); + OCF_CHECK_NULL(io); + + core_id = ocf_core_get_id(core); + cache = ocf_core_get_cache(core); + + stats = &core->counters->debug_stats; + + idx = to_packet_idx(io->bytes); + if (io->dir == OCF_WRITE) + env_atomic64_inc(&stats->write_size[idx]); + else + env_atomic64_inc(&stats->read_size[idx]); + + idx = to_align_idx(io->addr); + if (io->dir == OCF_WRITE) + env_atomic64_inc(&stats->write_align[idx]); + else + env_atomic64_inc(&stats->read_align[idx]); +} + +#else + +void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io) {} + +#endif diff --git a/src/ocf_stats_builder.c b/src/ocf_stats_builder.c new file mode 100644 index 0000000..49b46a3 --- /dev/null +++ b/src/ocf_stats_builder.c @@ -0,0 +1,314 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_priv.h" +#include "metadata/metadata.h" +#include "engine/cache_engine.h" +#include "utils/utils_part.h" +#include "utils/utils_cache_line.h" + +#define _ocf_stats_zero(stats) \ + do { \ + if (stats) { \ + typeof(*stats) zero = { { 0 } }; \ + *stats = zero; \ + } \ + } while (0) + +static uint64_t _percentage(uint64_t numerator, uint64_t denominator) +{ + uint64_t result; + if (denominator) { + result = 1000 * numerator / denominator; + } else { + result = 0; + } + return result; +} + +static uint64_t _lines4k(uint64_t size, + ocf_cache_line_size_t cache_line_size) +{ + long unsigned int result; + + result = size * (cache_line_size / 4096); + + return result; +} + +static uint64_t _bytes4k(uint64_t bytes) +{ + return (bytes + 4095UL) >> 12; +} + +static uint64_t _get_cache_occupancy(ocf_cache_t cache) +{ + uint64_t result = 0; + uint32_t i; + + for (i = 0; i != OCF_CORE_MAX; ++i) { + if (!env_bit_test(i, cache->conf_meta->valid_object_bitmap)) + continue; + + result += env_atomic_read( + &cache->core_runtime_meta[i].cached_clines); + } + + return result; +} + +static void _set(struct ocf_stat *stat, uint64_t value, uint64_t denominator) +{ + stat->value = value; + stat->percent = _percentage(value, denominator); +} + +static void _fill_rq(struct ocf_stats_requests *rq, struct ocf_stats_core *s) +{ + uint64_t serviced = s->read_reqs.total + s->write_reqs.total; + uint64_t total = serviced + s->read_reqs.pass_through + + s->write_reqs.pass_through; + uint64_t hit; + + /* Reads Section */ + hit = s->read_reqs.total - (s->read_reqs.full_miss + + s->read_reqs.partial_miss); + _set(&rq->rd_hits, hit, total); + _set(&rq->rd_partial_misses, s->read_reqs.partial_miss, total); + _set(&rq->rd_full_misses, s->read_reqs.full_miss, total); + _set(&rq->rd_total, s->read_reqs.total, total); + + /* Write Section */ + hit = s->write_reqs.total - (s->write_reqs.full_miss + + s->write_reqs.partial_miss); + _set(&rq->wr_hits, hit, total); + _set(&rq->wr_partial_misses, s->write_reqs.partial_miss, total); + _set(&rq->wr_full_misses, s->write_reqs.full_miss, total); + _set(&rq->wr_total, s->write_reqs.total, total); + + /* Pass-Through section */ + _set(&rq->rd_pt, s->read_reqs.pass_through, total); + _set(&rq->wr_pt, s->write_reqs.pass_through, total); + + /* Summary */ + _set(&rq->serviced, serviced, total); + _set(&rq->total, total, total); +} + +static void _fill_blocks(struct ocf_stats_blocks *blocks, + struct ocf_stats_core *s) +{ + uint64_t rd, wr, total; + + /* Core data object */ + rd = _bytes4k(s->core_obj.read); + wr = _bytes4k(s->core_obj.write); + total = rd + wr; + _set(&blocks->core_obj_rd, rd, total); + _set(&blocks->core_obj_wr, wr, total); + _set(&blocks->core_obj_total, total, total); + + /* Cache data object */ + rd = _bytes4k(s->cache_obj.read); + wr = _bytes4k(s->cache_obj.write); + total = rd + wr; + _set(&blocks->cache_obj_rd, rd, total); + _set(&blocks->cache_obj_wr, wr, total); + _set(&blocks->cache_obj_total, total, total); + + /* Core (cache volume) */ + rd = _bytes4k(s->core.read); + wr = _bytes4k(s->core.write); + total = rd + wr; + _set(&blocks->volume_rd, rd, total); + _set(&blocks->volume_wr, wr, total); + _set(&blocks->volume_total, total, total); +} + +static void _fill_errors(struct ocf_stats_errors *errors, + struct ocf_stats_core *s) +{ + uint64_t rd, wr, total; + + rd = s->core_errors.read; + wr = s->core_errors.write; + total = rd + wr; + _set(&errors->core_obj_rd, rd, total); + _set(&errors->core_obj_wr, wr, total); + _set(&errors->core_obj_total, total, total); + + rd = s->cache_errors.read; + wr = s->cache_errors.write; + total = rd + wr; + _set(&errors->cache_obj_rd, rd, total); + _set(&errors->cache_obj_wr, wr, total); + _set(&errors->cache_obj_total, total, total); + + total = s->core_errors.read + s->core_errors.write + + s->cache_errors.read + s->cache_errors.write; + + _set(&errors->total, total, total); +} + +int ocf_stats_collect_core(ocf_core_t core, + struct ocf_stats_usage *usage, + struct ocf_stats_requests *rq, + struct ocf_stats_blocks *blocks, + struct ocf_stats_errors *errors) +{ + ocf_cache_t cache; + uint64_t cache_occupancy, cache_size, cache_line_size; + struct ocf_stats_core s; + int result; + + OCF_CHECK_NULL(core); + + result = ocf_core_get_stats(core, &s); + if (result) + return result; + + cache = ocf_core_get_cache(core); + cache_line_size = ocf_cache_get_line_size(cache); + cache_size = cache->conf_meta->cachelines; + cache_occupancy = _get_cache_occupancy(cache); + + _ocf_stats_zero(usage); + _ocf_stats_zero(rq); + _ocf_stats_zero(blocks); + _ocf_stats_zero(errors); + + if (usage) { + _set(&usage->occupancy, + _lines4k(s.cache_occupancy, cache_line_size), + _lines4k(cache_size, cache_line_size)); + + _set(&usage->free, + _lines4k(cache_size - cache_occupancy, cache_line_size), + _lines4k(cache_size, cache_line_size)); + + _set(&usage->clean, + _lines4k(s.cache_occupancy - s.dirty, cache_line_size), + _lines4k(s.cache_occupancy, cache_line_size)); + + _set(&usage->dirty, + _lines4k(s.dirty, cache_line_size), + _lines4k(s.cache_occupancy, cache_line_size)); + } + + if (rq) + _fill_rq(rq, &s); + + if (blocks) + _fill_blocks(blocks, &s); + + if (errors) + _fill_errors(errors, &s); + + return 0; +} + +static void _accumulate_block(struct ocf_stats_block *to, + const struct ocf_stats_block *from) +{ + to->read += from->read; + to->write += from->write; +} + +static void _accumulate_reqs(struct ocf_stats_req *to, + const struct ocf_stats_req *from) +{ + to->full_miss += from->full_miss; + to->partial_miss += from->partial_miss; + to->total += from->total; + to->pass_through += from->pass_through; +} + +static void _accumulate_errors(struct ocf_stats_error *to, + const struct ocf_stats_error *from) +{ + to->read += from->read; + to->write += from->write; +} + +static int _accumulate_stats(ocf_core_t core, void *cntx) +{ + struct ocf_stats_core stats, *total = cntx; + int result; + + result = ocf_core_get_stats(core, &stats); + if (result) + return result; + + _accumulate_block(&total->cache_obj, &stats.cache_obj); + _accumulate_block(&total->core_obj, &stats.core_obj); + _accumulate_block(&total->core, &stats.core); + + _accumulate_reqs(&total->read_reqs, &stats.read_reqs); + _accumulate_reqs(&total->write_reqs, &stats.write_reqs); + + _accumulate_errors(&total->cache_errors, &stats.cache_errors); + _accumulate_errors(&total->core_errors, &stats.core_errors); + + return 0; +} + +int ocf_stats_collect_cache(ocf_cache_t cache, + struct ocf_stats_usage *usage, + struct ocf_stats_requests *rq, + struct ocf_stats_blocks *blocks, + struct ocf_stats_errors *errors) +{ + uint64_t cache_line_size; + struct ocf_cache_info info; + struct ocf_stats_core s = { 0 }; + int result; + + OCF_CHECK_NULL(cache); + + result = ocf_cache_get_info(cache, &info); + if (result) + return result; + + cache_line_size = ocf_cache_get_line_size(cache); + + _ocf_stats_zero(usage); + _ocf_stats_zero(rq); + _ocf_stats_zero(blocks); + _ocf_stats_zero(errors); + + result = ocf_core_visit(cache, _accumulate_stats, &s, true); + if (result) + return result; + + if (usage) { + _set(&usage->occupancy, + _lines4k(info.occupancy, cache_line_size), + _lines4k(info.size, cache_line_size)); + + _set(&usage->free, + _lines4k(info.size - info.occupancy, cache_line_size), + _lines4k(info.size, cache_line_size)); + + _set(&usage->clean, + _lines4k(info.occupancy - info.dirty, cache_line_size), + _lines4k(info.size, cache_line_size)); + + _set(&usage->dirty, + _lines4k(info.dirty, cache_line_size), + _lines4k(info.size, cache_line_size)); + } + + if (rq) + _fill_rq(rq, &s); + + if (blocks) + _fill_blocks(blocks, &s); + + if (errors) + _fill_errors(errors, &s); + + return 0; +} diff --git a/src/ocf_stats_priv.h b/src/ocf_stats_priv.h new file mode 100644 index 0000000..446173b --- /dev/null +++ b/src/ocf_stats_priv.h @@ -0,0 +1,61 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_STATS_PRIV_H__ +#define __OCF_STATS_PRIV_H__ + +struct ocf_counters_block { + env_atomic64 read_bytes; + env_atomic64 write_bytes; +}; + +struct ocf_counters_error { + env_atomic read; + env_atomic write; +}; + +struct ocf_counters_req { + env_atomic64 partial_miss; + env_atomic64 full_miss; + env_atomic64 total; + env_atomic64 pass_through; +}; + +/** + * statistics appropriate for given io class. + */ +struct ocf_counters_part { + struct ocf_counters_req read_reqs; + struct ocf_counters_req write_reqs; + + struct ocf_counters_block blocks; +}; + +#ifdef OCF_DEBUG_STATS +struct ocf_counters_debug { + env_atomic64 write_size[IO_PACKET_NO]; + env_atomic64 read_size[IO_PACKET_NO]; + + env_atomic64 read_align[IO_ALIGN_NO]; + env_atomic64 write_align[IO_ALIGN_NO]; +}; +#endif + +struct ocf_counters_core { + struct ocf_counters_block core_blocks; + struct ocf_counters_block cache_blocks; + + struct ocf_counters_error core_errors; + struct ocf_counters_error cache_errors; + + struct ocf_counters_part part_counters[OCF_IO_CLASS_MAX]; +#ifdef OCF_DEBUG_STATS + struct ocf_counters_debug debug_stats; +#endif +}; + +void ocf_stats_init(ocf_core_t core); + +#endif diff --git a/src/ocf_utils.c b/src/ocf_utils.c new file mode 100644 index 0000000..3df2747 --- /dev/null +++ b/src/ocf_utils.c @@ -0,0 +1,43 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "ocf_cache_priv.h" +#include "utils/utils_rq.h" +#include "ocf_utils.h" +#include "ocf_ctx_priv.h" + +int ocf_utils_init(struct ocf_ctx *ocf_ctx) +{ + int result; + + result = ocf_rq_allocator_init(ocf_ctx); + if (result) + goto ocf_utils_init_ERROR; + + ocf_ctx->resources.core_io_allocator = + env_allocator_create(sizeof(struct ocf_core_io), + "ocf_io"); + if (!ocf_ctx->resources.core_io_allocator) + goto ocf_utils_init_ERROR; + + return 0; + +ocf_utils_init_ERROR: + + ocf_utils_deinit(ocf_ctx); + + return -1; +} + +void ocf_utils_deinit(struct ocf_ctx *ocf_ctx) +{ + ocf_rq_allocator_deinit(ocf_ctx); + + if (ocf_ctx->resources.core_io_allocator) { + env_allocator_destroy(ocf_ctx->resources.core_io_allocator); + ocf_ctx->resources.core_io_allocator = NULL; + } +} diff --git a/src/ocf_utils.h b/src/ocf_utils.h new file mode 100644 index 0000000..96ef656 --- /dev/null +++ b/src/ocf_utils.h @@ -0,0 +1,13 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef OCF_UTILS_H_ +#define OCF_UTILS_H_ + +int ocf_utils_init(struct ocf_ctx *ocf_ctx); + +void ocf_utils_deinit(struct ocf_ctx *ocf_ctx); + +#endif /* OCF_UTILS_H_ */ diff --git a/src/utils/utils_allocator.c b/src/utils/utils_allocator.c new file mode 100644 index 0000000..240f6b0 --- /dev/null +++ b/src/utils/utils_allocator.c @@ -0,0 +1,267 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +#include "ocf/ocf.h" +#include "utils_allocator.h" +#include "../ocf_priv.h" +#include "../ocf_cache_priv.h" +#include "ocf_env.h" + +#define OCF_ALLOCATOR_K_MAX (128 * KiB) + +static int _ocf_realloc_with_cp(void **mem, size_t size, size_t count, + size_t *limit, bool cp) +{ + size_t alloc_size = size * count; + + ENV_BUG_ON(!mem); + ENV_BUG_ON(!limit); + + if (size && count) { + /* Memory reallocation request */ + + if (alloc_size > *limit) { + /* The space is not enough, we need allocate new one */ + + void *new_mem; + + if (alloc_size > OCF_ALLOCATOR_K_MAX) + new_mem = env_vzalloc(alloc_size); + else + new_mem = env_zalloc(alloc_size, ENV_MEM_NOIO); + + if (!new_mem) { + /* Allocation error */ + return -1; + } + + /* Free previous memory */ + if (*mem) { + if (cp) { + /* copy previous content into new allocated + * memory + */ + ENV_BUG_ON(env_memcpy(new_mem, alloc_size, *mem, *limit)); + + } + + if (*limit > OCF_ALLOCATOR_K_MAX) + env_vfree(*mem); + else + env_free(*mem); + } + + /* Update limit */ + *limit = alloc_size; + + /* Update memory pointer */ + *mem = new_mem; + + return 0; + } + + /* + * The memory space is enough, no action required. + * Space after allocation set to '0' + */ + if (cp) + ENV_BUG_ON(env_memset(*mem + alloc_size, *limit - alloc_size, 0)); + + return 0; + + } + + if ((size == 0) && (count == 0)) { + + if ((*mem) && (*limit)) { + /* Need to free memory */ + if (*limit > OCF_ALLOCATOR_K_MAX) + env_vfree(*mem); + else + env_free(*mem); + + /* Update limit */ + *((size_t *)limit) = 0; + *mem = NULL; + + return 0; + } + + if ((!*mem) && (*limit == 0)) { + /* No allocation before do nothing */ + return 0; + + } + } + + ENV_BUG(); + return -1; +} + +int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit) +{ + return _ocf_realloc_with_cp(mem, size, count, limit, false); +} + +int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit) +{ + return _ocf_realloc_with_cp(mem, size, count, limit, true); +} + +void ocf_realloc_init(void **mem, size_t *limit) +{ + ENV_BUG_ON(!mem); + ENV_BUG_ON(!limit); + + *mem = NULL; + *((size_t *)limit) = 0; +} + +enum { + ocf_mpool_1, + ocf_mpool_2, + ocf_mpool_4, + ocf_mpool_8, + ocf_mpool_16, + ocf_mpool_32, + ocf_mpool_64, + ocf_mpool_128, + + ocf_mpool_max +}; + +struct ocf_mpool { + struct ocf_cache *cache; + /*!< Cache instance */ + + uint32_t item_size; + /*!< Size of specific item of memory pool */ + + uint32_t hdr_size; + /*!< Header size before items */ + + env_allocator *allocator[ocf_mpool_max]; + /*!< OS handle to memory pool */ + + int flags; + /*!< Allocation flags */ +}; + +#define ALLOCATOR_NAME_MAX 128 + +struct ocf_mpool *ocf_mpool_create(struct ocf_cache *cache, + uint32_t hdr_size, uint32_t size, int flags, int mpool_max, + const char *name_perfix) +{ + uint32_t i; + char name[ALLOCATOR_NAME_MAX] = { '\0' }; + int result; + struct ocf_mpool *mpool; + + OCF_CHECK_NULL(name_perfix); + + mpool = env_zalloc(sizeof(*mpool), ENV_MEM_NORMAL); + if (!mpool) + goto ocf_multi_allocator_create_ERROR; + + mpool->item_size = size; + mpool->hdr_size = hdr_size; + mpool->cache = cache; + mpool->flags = flags; + + for (i = 0; i < min(ocf_mpool_max, mpool_max + 1); i++) { + result = snprintf(name, sizeof(name), "%s_%u", name_perfix, + (1 << i)); + if (result < 0 || result >= sizeof(name)) + goto ocf_multi_allocator_create_ERROR; + + mpool->allocator[i] = env_allocator_create( + hdr_size + (size * (1 << i)), name); + + if (!mpool->allocator[i]) + goto ocf_multi_allocator_create_ERROR; + } + + return mpool; + +ocf_multi_allocator_create_ERROR: + + ocf_mpool_destroy(mpool); + + return NULL; +} + +void ocf_mpool_destroy(struct ocf_mpool *mallocator) +{ + if (mallocator) { + uint32_t i; + + for (i = 0; i < ocf_mpool_max; i++) + if (mallocator->allocator[i]) + env_allocator_destroy(mallocator->allocator[i]); + + env_free(mallocator); + } +} + +static env_allocator *ocf_mpool_get_allocator( + struct ocf_mpool *mallocator, uint32_t count) +{ + unsigned int idx; + + if (unlikely(count == 0)) + return ocf_mpool_1; + + idx = 31 - __builtin_clz(count); + + if (__builtin_ffs(count) <= idx) + idx++; + + if (idx >= ocf_mpool_max) + return NULL; + + return mallocator->allocator[idx]; +} + +void *ocf_mpool_new_f(struct ocf_mpool *mpool, uint32_t count, int flags) +{ + void *items = NULL; + env_allocator *allocator; + + OCF_CHECK_NULL(mpool); + + allocator = ocf_mpool_get_allocator(mpool, count); + + if (allocator) + items = env_allocator_new(allocator); + else + items = env_zalloc(mpool->hdr_size + (mpool->item_size * count), flags); + +#ifdef ZERO_OR_NULL_PTR + if (ZERO_OR_NULL_PTR(items)) + return NULL; +#endif + + return items; +} + +void *ocf_mpool_new(struct ocf_mpool *mpool, uint32_t count) +{ + return ocf_mpool_new_f(mpool, count, mpool->flags); +} + +void ocf_mpool_del(struct ocf_mpool *mpool, + void *items, uint32_t count) +{ + env_allocator *allocator; + + OCF_CHECK_NULL(mpool); + + allocator = ocf_mpool_get_allocator(mpool, count); + + if (allocator) + env_allocator_del(allocator, items); + else + env_free(items); +} diff --git a/src/utils/utils_allocator.h b/src/utils/utils_allocator.h new file mode 100644 index 0000000..9542e9b --- /dev/null +++ b/src/utils/utils_allocator.h @@ -0,0 +1,69 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_ALLOCATOR_H_ +#define UTILS_ALLOCATOR_H_ + +/** + * @file utils_allocator.h + * @brief OCF memory reallocator + */ + +void ocf_realloc_init(void **mem, size_t *limit); + +int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit); + +int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit); + +/** + * @brief Initialize memory pointer and limit before reallocator usage + * + * @param[inout] mem - Pointer to the memory + * @param[inout] limit - Variable used internally by reallocator and indicates + * last allocation size + */ +#define OCF_REALLOC_INIT(mem, limit) \ + ocf_realloc_init((void **)mem, limit) + +/** + * @brief De-Initialize memory pointer and limit, free memory + * + * @param[inout] mem - Pointer to the memory + * @param[inout] limit - Variable used internally by reallocator and indicates + * last allocation size + */ +#define OCF_REALLOC_DEINIT(mem, limit) \ + ocf_realloc((void **)mem, 0, 0, limit) + +/** + * @brief Reallocate referenced memory if it is required. + * + * @param[inout] mem - Pointer to the memory + * @param[in] size - Size of particular element + * @param[in] count - Counts of element + * @param[inout] limit - Variable used internally by reallocator and indicates + * last allocation size + * + * @return 0 - Reallocation successful, Non zero - Realocation ERROR + */ +#define OCF_REALLOC(mem, size, count, limit) \ + ocf_realloc((void **)mem, size, count, limit) + +/** + * @brief Reallocate referenced memory if it is required and copy old content + * into new memory space, new memory space is set to '0' + * + * @param[inout] mem - Pointer to the memory + * @param[in] size - Size of particular element + * @param[in] count - Counts of element + * @param[inout] limit - Variable used internally by reallocator and indicates + * last allocation size + * + * @return 0 - Reallocation successful, Non zero - Realocation ERROR + */ +#define OCF_REALLOC_CP(mem, size, count, limit) \ + ocf_realloc_cp((void **)mem, size, count, limit) + +#endif /* UTILS_ALLOCATOR_H_ */ diff --git a/src/utils/utils_cache_line.c b/src/utils/utils_cache_line.c new file mode 100644 index 0000000..f56f3ce --- /dev/null +++ b/src/utils/utils_cache_line.c @@ -0,0 +1,177 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "utils_cache_line.h" + +static inline void ocf_cleaning_set_hot_cache_line(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(cleaning_type >= ocf_cleaning_max); + + if (cleaning_policy_ops[cleaning_type].set_hot_cache_line) { + cleaning_policy_ops[cleaning_type]. + set_hot_cache_line(cache, line); + } +} + +static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, ocf_cache_line_t line, + ocf_core_id_t core_id, ocf_part_id_t part_id) +{ + bool is_valid; + + ENV_BUG_ON(core_id >= OCF_CORE_MAX); + + if (metadata_clear_valid_sec_changed(cache, line, start_bit, end_bit, + &is_valid)) { + /* + * Update the number of cached data for that core object + */ + env_atomic_dec(&cache->core_runtime_meta[core_id]. + cached_clines); + env_atomic_dec(&cache->core_runtime_meta[core_id]. + part_counters[part_id].cached_clines); + } + + /* If we have waiters, do not remove cache line + * for this cache line which will use one, clear + * only valid bits + */ + if (!is_valid && !ocf_cache_line_are_waiters(cache, line)) { + ocf_purge_eviction_policy(cache, line); + ocf_metadata_sparse_cache_line(cache, line); + } +} + +void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx) +{ + ocf_cache_line_t line = rq->map[map_idx].coll_idx; + ocf_part_id_t part_id; + ocf_core_id_t core_id; + + ENV_BUG_ON(!rq); + + part_id = ocf_metadata_get_partition_id(cache, line); + core_id = rq->core_id; + + __set_cache_line_invalid(cache, start_bit, end_bit, line, core_id, + part_id); + + ocf_metadata_flush_mark(cache, rq, map_idx, INVALID, start_bit, + end_bit); +} + +void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, ocf_cache_line_t line) +{ + ocf_part_id_t part_id; + ocf_core_id_t core_id; + + ocf_metadata_get_core_and_part_id(cache, line, &core_id, &part_id); + + __set_cache_line_invalid(cache, start_bit, end_bit, line, core_id, + part_id); +} + +void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx) +{ + ocf_core_id_t core_id = rq->core_id; + ocf_cache_line_t line = rq->map[map_idx].coll_idx; + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); + + ENV_BUG_ON(!(core_id < OCF_CORE_MAX)); + + if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) { + /* + * Update the number of cached data for that core object + */ + env_atomic_inc(&cache->core_runtime_meta[core_id]. + cached_clines); + env_atomic_inc(&cache->core_runtime_meta[core_id]. + part_counters[part_id].cached_clines); + } +} + +void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx) +{ + ocf_core_id_t core_id = rq->core_id; + ocf_cache_line_t line = rq->map[map_idx].coll_idx; + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); + uint8_t evp_type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(!(core_id < OCF_CORE_MAX)); + + if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit)) { + /* + * Update the number of dirty cached data for that + * core object + */ + if (env_atomic_dec_and_test(&cache->core_runtime_meta[core_id]. + dirty_clines)) { + /* + * If this is last dirty cline reset dirty + * timestamp + */ + env_atomic64_set(&cache->core_runtime_meta[core_id]. + dirty_since, 0); + } + + /* + * decrement dirty clines statistic for given cline + */ + env_atomic_dec(&cache->core_runtime_meta[core_id]. + part_counters[part_id].dirty_clines); + + if (likely(evict_policy_ops[evp_type].clean_cline)) + evict_policy_ops[evp_type].clean_cline(cache, part_id, line); + + ocf_purge_cleaning_policy(cache, line); + } + + ocf_metadata_flush_mark(cache, rq, map_idx, CLEAN, start_bit, end_bit); +} + +void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx) +{ + ocf_core_id_t core_id = rq->core_id; + ocf_cache_line_t line = rq->map[map_idx].coll_idx; + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line); + uint8_t evp_type = cache->conf_meta->eviction_policy_type; + + ENV_BUG_ON(!(core_id < OCF_CORE_MAX)); + + if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit)) { + /* + * If this is first dirty cline set dirty timestamp + */ + env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id]. + dirty_since, 0, env_get_tick_count()); + + /* + * Update the number of dirty cached data for that + * core object + */ + env_atomic_inc(&cache->core_runtime_meta[core_id].dirty_clines); + + /* + * increment dirty clines statistic for given cline + */ + env_atomic_inc(&cache->core_runtime_meta[core_id]. + part_counters[part_id].dirty_clines); + + if (likely(evict_policy_ops[evp_type].dirty_cline)) + evict_policy_ops[evp_type].dirty_cline(cache, part_id, line); + } + + ocf_cleaning_set_hot_cache_line(cache, line); + + ocf_metadata_flush_mark(cache, rq, map_idx, DIRTY, start_bit, end_bit); +} diff --git a/src/utils/utils_cache_line.h b/src/utils/utils_cache_line.h new file mode 100644 index 0000000..2ad5785 --- /dev/null +++ b/src/utils/utils_cache_line.h @@ -0,0 +1,372 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_CACHE_LINE_H_ +#define UTILS_CACHE_LINE_H_ + +#include "../metadata/metadata.h" +#include "../eviction/eviction.h" +#include "../eviction/ops.h" +#include "../concurrency/ocf_cache_concurrency.h" +#include "../engine/cache_engine.h" +#include "../ocf_request.h" +#include "../ocf_def_priv.h" + +/** + * @file utils_cache_line.h + * @brief OCF utilities for cache line operations + */ + +static inline ocf_cache_line_size_t ocf_line_size( + struct ocf_cache *cache) +{ + return cache->metadata.settings.size; +} + +static inline uint64_t ocf_line_pages(struct ocf_cache *cache) +{ + return cache->metadata.settings.size / PAGE_SIZE; +} + +static inline uint64_t ocf_line_sectors(struct ocf_cache *cache) +{ + return cache->metadata.settings.sector_count; +} + +static inline uint64_t ocf_line_end_sector(struct ocf_cache *cache) +{ + return cache->metadata.settings.sector_end; +} + +static inline uint64_t ocf_line_start_sector(struct ocf_cache *cache) +{ + return cache->metadata.settings.sector_start; +} + +static inline uint64_t ocf_bytes_round_lines(struct ocf_cache *cache, + uint64_t bytes) +{ + return (bytes + ocf_line_size(cache) - 1) / ocf_line_size(cache); +} + +static inline uint64_t ocf_bytes_2_lines(struct ocf_cache *cache, + uint64_t bytes) +{ + return bytes / ocf_line_size(cache); +} + +static inline uint64_t ocf_bytes_2_lines_round_up( + struct ocf_cache *cache, uint64_t bytes) +{ + return DIV_ROUND_UP(bytes, ocf_line_size(cache)); +} + +static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache, + uint64_t lines) +{ + return lines * ocf_line_size(cache); +} + +/** + * @brief Set cache line invalid + * + * @param cache Cache instance + * @param start_bit Start bit of cache line for which state will be set + * @param end_bit End bit of cache line for which state will be set + * @param rq OCF request + * @param map_idx Array index to map containing cache line to invalid + */ +void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx); + + +/** + * @brief Set cache line invalid without flush + * + * @param cache Cache instance + * @param start_bit Start bit of cache line for which state will be set + * @param end_bit End bit of cache line for which state will be set + * @param line Cache line to invalid + */ +void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, ocf_cache_line_t line); + +/** + * @brief Set cache line valid + * + * @param cache Cache instance + * @param start_bit Start bit of cache line for which state will be set + * @param end_bit End bit of cache line for which state will be set + * @param rq OCF request + * @param map_idx Array index to map containing cache line to invalid + */ +void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx); + +/** + * @brief Set cache line clean + * + * @param cache Cache instance + * @param start_bit Start bit of cache line for which state will be set + * @param end_bit End bit of cache line for which state will be set + * @param rq OCF request + * @param map_idx Array index to map containing cache line to invalid + */ +void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx); + +/** + * @brief Set cache line dirty + * + * @param cache Cache instance + * @param start_bit Start bit of cache line for which state will be set + * @param end_bit End bit of cache line for which state will be set + * @param rq OCF request + * @param map_idx Array index to map containing cache line to invalid + */ +void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit, + uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx); + +/** + * @brief Remove cache line from cleaning policy + * + * @param cache - cache instance + * @param line - cache line to be removed + * + */ +static inline void ocf_purge_cleaning_policy(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + ocf_cleaning_t clean_type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(clean_type >= ocf_cleaning_max); + + /* Remove from cleaning policy */ + if (cleaning_policy_ops[clean_type].purge_cache_block != NULL) + cleaning_policy_ops[clean_type].purge_cache_block(cache, line); +} + +/** + * @brief Remove cache line from eviction policy + * + * @param cache - cache instance + * @param line - cache line to be removed + */ +static inline void ocf_purge_eviction_policy(struct ocf_cache *cache, + ocf_cache_line_t line) +{ + ocf_eviction_purge_cache_line(cache, line); +} + +/** + * @brief Set cache line clean and invalid and remove form lists + * + * @param cache Cache instance + * @param start Start bit of range in cache line to purge + * @param end End bit of range in cache line to purge + * @param rq OCF request + * @param map_idx Array index to map containing cache line to purge + */ +static inline void _ocf_purge_cache_line_sec(struct ocf_cache *cache, + uint8_t start, uint8_t stop, struct ocf_request *rq, + uint32_t map_idx) +{ + + set_cache_line_clean(cache, start, stop, rq, map_idx); + + set_cache_line_invalid(cache, start, stop, rq, map_idx); +} + +/** + * @brief Purge cache line (remove completely, form collision, move to free + * partition, from cleaning policy and eviction policy) + * + * @param rq - OCF request to purge + */ +static inline void ocf_purge_map_info(struct ocf_request *rq) +{ + uint32_t map_idx = 0; + uint8_t start_bit; + uint8_t end_bit; + struct ocf_map_info *map = rq->map; + struct ocf_cache *cache = rq->cache; + uint32_t count = rq->core_line_count; + + /* Purge range on the basis of map info + * + * | 01234567 | 01234567 | ... | 01234567 | 01234567 | + * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | + * | first | Middle | last | + */ + + for (map_idx = 0; map_idx < count; map_idx++) { + if (map[map_idx].status == LOOKUP_MISS) + continue; + + start_bit = 0; + end_bit = ocf_line_end_sector(cache); + + if (map_idx == 0) { + /* First */ + + start_bit = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + + } + + if (map_idx == (count - 1)) { + /* Last */ + + end_bit = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1) % + ocf_line_sectors(cache); + } + + _ocf_purge_cache_line_sec(cache, start_bit, end_bit, rq, + map_idx); + } +} + +static inline void ocf_set_valid_map_info(struct ocf_request *rq) +{ + uint32_t map_idx = 0; + uint8_t start_bit; + uint8_t end_bit; + struct ocf_cache *cache = rq->cache; + uint32_t count = rq->core_line_count; + struct ocf_map_info *map = rq->map; + + /* Set valid bits for sectors on the basis of map info + * + * | 01234567 | 01234567 | ... | 01234567 | 01234567 | + * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | + * | first | Middle | last | + */ + + for (map_idx = 0; map_idx < count; map_idx++) { + ENV_BUG_ON(map[map_idx].status == LOOKUP_MISS); + + start_bit = 0; + end_bit = ocf_line_end_sector(cache); + + if (map_idx == 0) { + /* First */ + + start_bit = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + } + + if (map_idx == (count - 1)) { + /* Last */ + + end_bit = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1) + % ocf_line_sectors(cache); + } + + set_cache_line_valid(cache, start_bit, end_bit, rq, map_idx); + } +} + +static inline void ocf_set_dirty_map_info(struct ocf_request *rq) +{ + uint32_t map_idx = 0; + uint8_t start_bit; + uint8_t end_bit; + struct ocf_cache *cache = rq->cache; + uint32_t count = rq->core_line_count; + + /* Set valid bits for sectors on the basis of map info + * + * | 01234567 | 01234567 | ... | 01234567 | 01234567 | + * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | + * | first | Middle | last | + */ + + for (map_idx = 0; map_idx < count; map_idx++) { + start_bit = 0; + end_bit = ocf_line_end_sector(cache); + + if (map_idx == 0) { + /* First */ + + start_bit = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + } + + if (map_idx == (count - 1)) { + /* Last */ + + end_bit = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1) % + ocf_line_sectors(cache); + } + + set_cache_line_dirty(cache, start_bit, end_bit, rq, map_idx); + } +} + +static inline void ocf_set_clean_map_info(struct ocf_request *rq) +{ + uint32_t map_idx = 0; + uint8_t start_bit; + uint8_t end_bit; + struct ocf_cache *cache = rq->cache; + uint32_t count = rq->core_line_count; + + /* Set valid bits for sectors on the basis of map info + * + * | 01234567 | 01234567 | ... | 01234567 | 01234567 | + * | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- | + * | first | Middle | last | + */ + + for (map_idx = 0; map_idx < count; map_idx++) { + start_bit = 0; + end_bit = ocf_line_end_sector(cache); + + if (map_idx == 0) { + /* First */ + + start_bit = BYTES_TO_SECTORS(rq->byte_position) + % ocf_line_sectors(cache); + } + + if (map_idx == (count - 1)) { + /* Last */ + + end_bit = BYTES_TO_SECTORS(rq->byte_position + + rq->byte_length - 1) % + ocf_line_sectors(cache); + + } + + set_cache_line_clean(cache, start_bit, end_bit, rq, map_idx); + } +} + +/** + * @brief Validate cache line size + * + * @param[in] size Cache line size + * + * @retval true cache line size is valid + * @retval false cache line is invalid + */ +static inline bool ocf_cache_line_size_is_valid(uint64_t size) +{ + switch (size) { + case 4 * KiB: + case 8 * KiB: + case 16 * KiB: + case 32 * KiB: + case 64 * KiB: + return true; + default: + return false; + } +} + +#endif /* UTILS_CACHE_LINE_H_ */ diff --git a/src/utils/utils_cleaner.c b/src/utils/utils_cleaner.c new file mode 100644 index 0000000..0acf859 --- /dev/null +++ b/src/utils/utils_cleaner.c @@ -0,0 +1,1049 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../engine/engine_common.h" +#include "../concurrency/ocf_concurrency.h" +#include "utils_cleaner.h" +#include "utils_rq.h" +#include "utils_io.h" +#include "utils_cache_line.h" + +#define OCF_UTILS_CLEANER_DEBUG 0 + +#if 1 == OCF_UTILS_CLEANER_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Utils][cleaner] %s\n", __func__) + +#define OCF_DEBUG_MSG(cache, msg) \ + ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - %s\n", \ + __func__, msg) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Utils][cleaner] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_MSG(cache, msg) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + + +struct ocf_cleaner_sync { + env_completion cmpl; + int error; +}; + +/* + * Allocate cleaning request + */ +static struct ocf_request *_ocf_cleaner_alloc_rq(struct ocf_cache *cache, + uint32_t count, const struct ocf_cleaner_attribs *attribs) +{ + struct ocf_request *rq = ocf_rq_new_extended(cache, 0, 0, + count * ocf_line_size(cache), OCF_READ); + int ret; + + if (!rq) + return NULL; + + rq->info.internal = true; + rq->info.cleaner_cache_line_lock = attribs->cache_line_lock; + + /* Allocate pages for cleaning IO */ + rq->data = ctx_data_alloc(cache->owner, + ocf_line_size(cache) / PAGE_SIZE * count); + if (!rq->data) { + ocf_rq_put(rq); + return NULL; + } + + ret = ctx_data_mlock(cache->owner, rq->data); + if (ret) { + ctx_data_free(cache->owner, rq->data); + ocf_rq_put(rq); + return NULL; + } + + rq->io_queue = attribs->io_queue; + + return rq; +} + +enum { + ocf_cleaner_rq_type_master = 1, + ocf_cleaner_rq_type_slave = 2 +}; + +static struct ocf_request *_ocf_cleaner_alloc_master_rq( + struct ocf_cache *cache, uint32_t count, + const struct ocf_cleaner_attribs *attribs) +{ + struct ocf_request *rq = _ocf_cleaner_alloc_rq(cache, count, attribs); + + if (rq) { + /* Set type of cleaning request */ + rq->master_io_req_type = ocf_cleaner_rq_type_master; + + /* In master, save completion context and function */ + rq->priv = attribs->cmpl_context; + rq->master_io_req = attribs->cmpl_fn; + + /* The count of all requests */ + env_atomic_set(&rq->master_remaining, 1); + + OCF_DEBUG_PARAM(cache, "New master request, count = %u", + count); + } + return rq; +} + +static struct ocf_request *_ocf_cleaner_alloc_slave_rq( + struct ocf_request *master, + uint32_t count, const struct ocf_cleaner_attribs *attribs) +{ + struct ocf_request *rq = _ocf_cleaner_alloc_rq( + master->cache, count, attribs); + + if (rq) { + /* Set type of cleaning request */ + rq->master_io_req_type = ocf_cleaner_rq_type_slave; + + /* Slave refers to master request, get its reference counter */ + ocf_rq_get(master); + + /* Slave request contains reference to master */ + rq->master_io_req = master; + + /* One more additional slave request, increase global counter + * of requests count + */ + env_atomic_inc(&master->master_remaining); + + OCF_DEBUG_PARAM(rq->cache, + "New slave request, count = %u,all requests count = %d", + count, env_atomic_read(&master->master_remaining)); + } + return rq; +} + +static void _ocf_cleaner_dealloc_rq(struct ocf_request *rq) +{ + if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) { + /* Slave contains reference to the master request, + * release reference counter + */ + struct ocf_request *master = rq->master_io_req; + + OCF_DEBUG_MSG(rq->cache, "Put master request by slave"); + ocf_rq_put(master); + + OCF_DEBUG_MSG(rq->cache, "Free slave request"); + } else if (ocf_cleaner_rq_type_master == rq->master_io_req_type) { + OCF_DEBUG_MSG(rq->cache, "Free master request"); + } else { + ENV_BUG(); + } + + ctx_data_secure_erase(rq->cache->owner, rq->data); + ctx_data_munlock(rq->cache->owner, rq->data); + ctx_data_free(rq->cache->owner, rq->data); + ocf_rq_put(rq); +} + +/* + * cleaner - Get clean result + */ +static void _ocf_cleaner_set_error(struct ocf_request *rq) +{ + struct ocf_request *master = NULL; + + if (ocf_cleaner_rq_type_master == rq->master_io_req_type) { + master = rq; + } else if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) { + master = rq->master_io_req; + } else { + ENV_BUG(); + return; + } + + master->error = -EIO; +} + +static void _ocf_cleaner_complete_rq(struct ocf_request *rq) +{ + struct ocf_request *master = NULL; + ocf_end_t cmpl; + + if (ocf_cleaner_rq_type_master == rq->master_io_req_type) { + OCF_DEBUG_MSG(rq->cache, "Master completion"); + master = rq; + } else if (ocf_cleaner_rq_type_slave == rq->master_io_req_type) { + OCF_DEBUG_MSG(rq->cache, "Slave completion"); + master = rq->master_io_req; + } else { + ENV_BUG(); + return; + } + + OCF_DEBUG_PARAM(rq->cache, "Master requests remaining = %d", + env_atomic_read(&master->master_remaining)); + + if (env_atomic_dec_return(&master->master_remaining)) { + /* Not all requests completed */ + return; + } + + OCF_DEBUG_MSG(rq->cache, "All cleaning request completed"); + + /* Only master contains completion function and completion context */ + cmpl = master->master_io_req; + cmpl(master->priv, master->error); +} + +/* + * cleaner - Cache line lock, function lock cache lines depends on attributes + */ +static int _ocf_cleaner_cache_line_lock(struct ocf_request *rq) +{ + if (!rq->info.cleaner_cache_line_lock) + return OCF_LOCK_ACQUIRED; + + OCF_DEBUG_TRACE(rq->cache); + + return ocf_rq_trylock_rd(rq); +} + +/* + * cleaner - Cache line unlock, function unlock cache lines + * depends on attributes + */ +static void _ocf_cleaner_cache_line_unlock(struct ocf_request *rq) +{ + if (rq->info.cleaner_cache_line_lock) { + OCF_DEBUG_TRACE(rq->cache); + ocf_rq_unlock(rq); + } +} + +static bool _ocf_cleaner_sector_is_dirty(struct ocf_cache *cache, + ocf_cache_line_t line, uint8_t sector) +{ + bool dirty = metadata_test_dirty_one(cache, line, sector); + bool valid = metadata_test_valid_one(cache, line, sector); + + if (!valid && dirty) { + /* not valid but dirty - IMPROPER STATE!!! */ + ENV_BUG(); + } + + return valid ? dirty : false; +} + +static void _ocf_cleaner_finish_rq(struct ocf_request *rq) +{ + /* Handle cache lines unlocks */ + _ocf_cleaner_cache_line_unlock(rq); + + /* Signal completion to the caller of cleaning */ + _ocf_cleaner_complete_rq(rq); + + /* Free allocated resources */ + _ocf_cleaner_dealloc_rq(rq); +} + +static void _ocf_cleaner_flush_cache_io_end(void *priv, int error) +{ + struct ocf_request *rq = priv; + + if (error) { + ocf_metadata_error(rq->cache); + rq->error = error; + } + + OCF_DEBUG_MSG(rq->cache, "Cache flush finished"); + + _ocf_cleaner_finish_rq(rq); +} + +static int _ocf_cleaner_fire_flush_cache(struct ocf_request *rq) +{ + OCF_DEBUG_TRACE(rq->cache); + + ocf_submit_obj_flush(&rq->cache->device->obj, + _ocf_cleaner_flush_cache_io_end, rq); + + return 0; +} + +static const struct ocf_io_if _io_if_flush_cache = { + .read = _ocf_cleaner_fire_flush_cache, + .write = _ocf_cleaner_fire_flush_cache, +}; + +static void _ocf_cleaner_metadata_io_end(void *private_data, int error) +{ + struct ocf_request *rq = private_data; + + if (error) { + ocf_metadata_error(rq->cache); + rq->error = error; + _ocf_cleaner_finish_rq(rq); + return; + } + + OCF_DEBUG_MSG(rq->cache, "Metadata flush finished"); + + rq->io_if = &_io_if_flush_cache; + ocf_engine_push_rq_front(rq, true); +} + +static int _ocf_cleaner_update_metadata(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + const struct ocf_map_info *iter = rq->map; + uint32_t i; + ocf_cache_line_t cache_line; + + OCF_DEBUG_TRACE(rq->cache); + + OCF_METADATA_LOCK_WR(); + /* Update metadata */ + for (i = 0; i < rq->core_line_count; i++, iter++) { + if (iter->status == LOOKUP_MISS) + continue; + + if (iter->invalid) { + /* An error, do not clean */ + continue; + } + + cache_line = iter->coll_idx; + + if (!metadata_test_dirty(cache, cache_line)) + continue; + + ocf_metadata_get_core_and_part_id(cache, cache_line, + &rq->core_id, &rq->part_id); + + set_cache_line_clean(cache, 0, ocf_line_end_sector(cache), rq, + i); + } + + ocf_metadata_flush_do_asynch(cache, rq, _ocf_cleaner_metadata_io_end); + OCF_METADATA_UNLOCK_WR(); + + return 0; +} + +static const struct ocf_io_if _io_if_update_metadata = { + .read = _ocf_cleaner_update_metadata, + .write = _ocf_cleaner_update_metadata, +}; + +static void _ocf_cleaner_flush_cores_io_end(struct ocf_map_info *map, + struct ocf_request *rq, int error) +{ + uint32_t i; + struct ocf_map_info *iter = rq->map; + + if (error) { + /* Flush error, set error for all cache line of this core */ + for (i = 0; i < rq->core_line_count; i++, iter++) { + if (iter->status == LOOKUP_MISS) + continue; + + if (iter->core_id == map->core_id) + iter->invalid = true; + } + + _ocf_cleaner_set_error(rq); + } + + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_MSG(rq->cache, "Core flush finished"); + + /* + * All core writes done, switch to post cleaning activities + */ + rq->io_if = &_io_if_update_metadata; + ocf_engine_push_rq_front(rq, true); +} + +static void _ocf_cleaner_flush_cores_io_cmpl(struct ocf_io *io, int error) +{ + _ocf_cleaner_flush_cores_io_end(io->priv1, io->priv2, error); + + ocf_io_put(io); +} + +static int _ocf_cleaner_fire_flush_cores(struct ocf_request *rq) +{ + uint32_t i; + ocf_core_id_t core_id = OCF_CORE_MAX; + struct ocf_cache *cache = rq->cache; + struct ocf_map_info *iter = rq->map; + struct ocf_io *io; + + OCF_DEBUG_TRACE(rq->cache); + + /* Protect IO completion race */ + env_atomic_set(&rq->req_remaining, 1); + + /* Submit flush requests */ + for (i = 0; i < rq->core_line_count; i++, iter++) { + if (iter->invalid) { + /* IO error, skip this item */ + continue; + } + + if (iter->status == LOOKUP_MISS) + continue; + + if (core_id == iter->core_id) + continue; + + core_id = iter->core_id; + + env_atomic_inc(&rq->req_remaining); + + io = ocf_new_core_io(cache, core_id); + if (!io) { + _ocf_cleaner_flush_cores_io_end(iter, rq, -ENOMEM); + continue; + } + + ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0); + ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_flush_cores_io_cmpl); + + ocf_dobj_submit_flush(io); + } + + /* Protect IO completion race */ + _ocf_cleaner_flush_cores_io_end(NULL, rq, 0); + + return 0; +} + +static const struct ocf_io_if _io_if_flush_cores = { + .read = _ocf_cleaner_fire_flush_cores, + .write = _ocf_cleaner_fire_flush_cores, +}; + +static void _ocf_cleaner_core_io_end(struct ocf_request *rq) +{ + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + OCF_DEBUG_MSG(rq->cache, "Core writes finished"); + + /* + * All cache read requests done, now we can submit writes to cores, + * Move processing to thread, where IO will be (and can be) submitted + */ + rq->io_if = &_io_if_flush_cores; + ocf_engine_push_rq_front(rq, true); +} + +static void _ocf_cleaner_core_io_cmpl(struct ocf_io *io, int error) +{ + struct ocf_map_info *map = io->priv1; + struct ocf_request *rq = io->priv2; + + if (error) { + map->invalid |= 1; + _ocf_cleaner_set_error(rq); + env_atomic_inc(&rq->cache->core_obj[map->core_id].counters-> + core_errors.write); + } + + _ocf_cleaner_core_io_end(rq); + + ocf_io_put(io); +} + +static void _ocf_cleaner_core_io_for_dirty_range(struct ocf_request *rq, + struct ocf_map_info *iter, uint64_t begin, uint64_t end) +{ + uint64_t addr, offset; + int err; + struct ocf_cache *cache = rq->cache; + struct ocf_io *io; + struct ocf_counters_block *core_stats = + &cache->core_obj[iter->core_id].counters->core_blocks; + ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, + iter->coll_idx); + + io = ocf_new_core_io(cache, iter->core_id); + if (!io) + goto error; + + addr = (ocf_line_size(cache) * iter->core_line) + + SECTORS_TO_BYTES(begin); + offset = (ocf_line_size(cache) * iter->hash_key) + + SECTORS_TO_BYTES(begin); + + ocf_io_configure(io, addr, SECTORS_TO_BYTES(end - begin), OCF_WRITE, + part_id, 0); + err = ocf_io_set_data(io, rq->data, offset); + if (err) { + ocf_io_put(io); + goto error; + } + + ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_core_io_cmpl); + + env_atomic64_add(SECTORS_TO_BYTES(end - begin), &core_stats->write_bytes); + + OCF_DEBUG_PARAM(rq->cache, "Core write, line = %llu, " + "sector = %llu, count = %llu", iter->core_line, begin, + end - begin); + + /* Increase IO counter to be processed */ + env_atomic_inc(&rq->req_remaining); + + /* Send IO */ + ocf_dobj_submit_io(io); + + return; +error: + iter->invalid = true; + _ocf_cleaner_set_error(rq); +} + +static void _ocf_cleaner_core_submit_io(struct ocf_request *rq, + struct ocf_map_info *iter) +{ + uint64_t i, dirty_start = 0; + struct ocf_cache *cache = rq->cache; + bool counting_dirty = false; + + /* Check integrity of entry to be cleaned */ + if (metadata_test_valid(cache, iter->coll_idx) + && metadata_test_dirty(cache, iter->coll_idx)) { + + _ocf_cleaner_core_io_for_dirty_range(rq, iter, 0, + ocf_line_sectors(cache)); + + return; + } + + /* Sector cleaning, a little effort is required to this */ + for (i = 0; i < ocf_line_sectors(cache); i++) { + if (!_ocf_cleaner_sector_is_dirty(cache, iter->coll_idx, i)) { + if (counting_dirty) { + counting_dirty = false; + _ocf_cleaner_core_io_for_dirty_range(rq, iter, + dirty_start, i); + } + + continue; + } + + if (!counting_dirty) { + counting_dirty = true; + dirty_start = i; + } + + } + + if (counting_dirty) + _ocf_cleaner_core_io_for_dirty_range(rq, iter, dirty_start, i); +} + +static int _ocf_cleaner_fire_core(struct ocf_request *rq) +{ + uint32_t i; + struct ocf_map_info *iter; + + OCF_DEBUG_TRACE(rq->cache); + + /* Protect IO completion race */ + env_atomic_set(&rq->req_remaining, 1); + + /* Submits writes to the core */ + for (i = 0; i < rq->core_line_count; i++) { + iter = &(rq->map[i]); + + if (iter->invalid) { + /* IO read error on cache, skip this item */ + continue; + } + + if (iter->status == LOOKUP_MISS) + continue; + + _ocf_cleaner_core_submit_io(rq, iter); + } + + /* Protect IO completion race */ + _ocf_cleaner_core_io_end(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_fire_core = { + .read = _ocf_cleaner_fire_core, + .write = _ocf_cleaner_fire_core, +}; + +static void _ocf_cleaner_cache_io_end(struct ocf_request *rq) +{ + if (env_atomic_dec_return(&rq->req_remaining)) + return; + + /* + * All cache read requests done, now we can submit writes to cores, + * Move processing to thread, where IO will be (and can be) submitted + */ + rq->io_if = &_io_if_fire_core; + ocf_engine_push_rq_front(rq, true); + + OCF_DEBUG_MSG(rq->cache, "Cache reads finished"); +} + +static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error) +{ + struct ocf_map_info *map = io->priv1; + struct ocf_request *rq = io->priv2; + + if (error) { + map->invalid |= 1; + _ocf_cleaner_set_error(rq); + env_atomic_inc(&rq->cache->core_obj[map->core_id].counters-> + cache_errors.read); + } + + _ocf_cleaner_cache_io_end(rq); + + ocf_io_put(io); +} + +/* + * cleaner - Traverse cache lines to be cleaned, detect sequential IO, and + * perform cache reads and core writes + */ +static int _ocf_cleaner_fire_cache(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + uint32_t i; + struct ocf_map_info *iter = rq->map; + uint64_t addr, offset; + ocf_part_id_t part_id; + struct ocf_io *io; + int err; + struct ocf_counters_block *cache_stats; + + /* Protect IO completion race */ + env_atomic_inc(&rq->req_remaining); + + for (i = 0; i < rq->core_line_count; i++, iter++) { + if (iter->core_id == OCF_CORE_MAX) + continue; + if (iter->status == LOOKUP_MISS) + continue; + + cache_stats = &cache->core_obj[iter->core_id]. + counters->cache_blocks; + + io = ocf_new_cache_io(cache); + if (!io) { + /* Allocation error */ + iter->invalid = true; + _ocf_cleaner_set_error(rq); + continue; + } + + OCF_DEBUG_PARAM(rq->cache, "Cache read, line = %u", + iter->coll_idx); + + addr = ocf_metadata_map_lg2phy(cache, + iter->coll_idx); + addr *= ocf_line_size(cache); + addr += cache->device->metadata_offset; + + offset = ocf_line_size(cache) * iter->hash_key; + + part_id = ocf_metadata_get_partition_id(cache, iter->coll_idx); + + ocf_io_set_cmpl(io, iter, rq, _ocf_cleaner_cache_io_cmpl); + ocf_io_configure(io, addr, ocf_line_size(cache), OCF_READ, + part_id, 0); + err = ocf_io_set_data(io, rq->data, offset); + if (err) { + ocf_io_put(io); + iter->invalid = true; + _ocf_cleaner_set_error(rq); + continue; + } + + env_atomic64_add(ocf_line_size(cache), &cache_stats->read_bytes); + + ocf_dobj_submit_io(io); + } + + /* Protect IO completion race */ + _ocf_cleaner_cache_io_end(rq); + + return 0; +} + +static const struct ocf_io_if _io_if_fire_cache = { + .read = _ocf_cleaner_fire_cache, + .write = _ocf_cleaner_fire_cache, +}; + +static void _ocf_cleaner_on_resume(struct ocf_request *rq) +{ + OCF_DEBUG_TRACE(rq->cache); + ocf_engine_push_rq_front(rq, true); +} + +static int _ocf_cleaner_fire(struct ocf_request *rq) +{ + int result; + + /* Set resume call backs */ + rq->resume = _ocf_cleaner_on_resume; + rq->io_if = &_io_if_fire_cache; + + /* Handle cache lines locks */ + result = _ocf_cleaner_cache_line_lock(rq); + + if (result >= 0) { + if (result == OCF_LOCK_ACQUIRED) { + OCF_DEBUG_MSG(rq->cache, "Lock acquired"); + _ocf_cleaner_fire_cache(rq); + } else { + OCF_DEBUG_MSG(rq->cache, "NO Lock"); + } + return 0; + } else { + OCF_DEBUG_MSG(rq->cache, "Lock error"); + } + + return result; +} + +/* Helper function for 'sort' */ +static int _ocf_cleaner_cmp_private(const void *a, const void *b) +{ + struct ocf_map_info *_a = (struct ocf_map_info *)a; + struct ocf_map_info *_b = (struct ocf_map_info *)b; + + static uint32_t step = 0; + + OCF_COND_RESCHED_DEFAULT(step); + + if (_a->core_id == _b->core_id) + return (_a->core_line > _b->core_line) ? 1 : -1; + + return (_a->core_id > _b->core_id) ? 1 : -1; +} + +/** + * Prepare cleaning request to be fired + * + * @param rq cleaning request + * @param i_out number of already filled map requests (remaining to be filled + * with missed + */ +static int _ocf_cleaner_do_fire(struct ocf_request *rq, uint32_t i_out, + bool do_sort) +{ + uint32_t i; + /* Set counts of cache IOs */ + env_atomic_set(&rq->req_remaining, i_out); + + /* fill tail of a request with fake MISSes so that it won't + * be cleaned + */ + for (; i_out < rq->core_line_count; ++i_out) { + rq->map[i_out].core_id = OCF_CORE_MAX; + rq->map[i_out].core_line = ULLONG_MAX; + rq->map[i_out].status = LOOKUP_MISS; + rq->map[i_out].hash_key = i_out; + } + + if (do_sort) { + /* Sort by core id and core line */ + env_sort(rq->map, rq->core_line_count, sizeof(rq->map[0]), + _ocf_cleaner_cmp_private, NULL); + for (i = 0; i < rq->core_line_count; i++) + rq->map[i].hash_key = i; + } + + /* issue actual request */ + return _ocf_cleaner_fire(rq); +} + +static inline uint32_t _ocf_cleaner_get_rq_max_count(uint32_t count, + bool low_mem) +{ + if (low_mem || count <= 4096) + return count < 128 ? count : 128; + + return 1024; +} + +static void _ocf_cleaner_fire_error(struct ocf_request *master, + struct ocf_request *rq, int err) +{ + master->error = err; + _ocf_cleaner_complete_rq(rq); + _ocf_cleaner_dealloc_rq(rq); +} + +/* + * cleaner - Main function + */ +void ocf_cleaner_fire(struct ocf_cache *cache, + const struct ocf_cleaner_attribs *attribs) +{ + uint32_t i, i_out = 0, count = attribs->count; + /* max cache lines to be cleaned with one request: 1024 if over 4k lines + * to be flushed, otherwise 128. for large cleaning operations, 1024 is + * optimal number, but for smaller 1024 is too large to benefit from + * cleaning request overlapping + */ + uint32_t max = _ocf_cleaner_get_rq_max_count(count, false); + ocf_cache_line_t cache_line; + /* it is possible that more than one cleaning request will be generated + * for each cleaning order, thus multiple allocations. At the end of + * loop, rq is set to zero and NOT deallocated, as deallocation is + * handled in completion. + * In addition first request we call master which contains completion + * contexts. Then succeeding request we call salve requests which + * contains reference to the master request + */ + struct ocf_request *rq = NULL, *master; + int err; + ocf_core_id_t core_id; + uint64_t core_sector; + + /* Allocate master request */ + master = _ocf_cleaner_alloc_master_rq(cache, max, attribs); + + if (!master) { + /* Some memory allocation error, try re-allocate request */ + max = _ocf_cleaner_get_rq_max_count(count, true); + master = _ocf_cleaner_alloc_master_rq(cache, max, attribs); + } + + if (!master) { + attribs->cmpl_fn(attribs->cmpl_context, -ENOMEM); + return; + } + + rq = master; + + /* prevent cleaning completion race */ + ocf_rq_get(master); + env_atomic_inc(&master->master_remaining); + + for (i = 0; i < count; i++) { + + /* when request hasn't yet been allocated or is just issued */ + if (!rq) { + if (max > count - i) { + /* less than max left */ + max = count - i; + } + + rq = _ocf_cleaner_alloc_slave_rq(master, max, attribs); + } + + if (!rq) { + /* Some memory allocation error, + * try re-allocate request + */ + max = _ocf_cleaner_get_rq_max_count(max, true); + rq = _ocf_cleaner_alloc_slave_rq(master, max, attribs); + } + + /* when request allocation failed stop processing */ + if (!rq) { + master->error = -ENOMEM; + break; + } + + if (attribs->getter(cache, attribs->getter_context, + i, &cache_line)) { + OCF_DEBUG_MSG(cache, "Skip"); + continue; + } + + /* when line already cleaned - rare condition under heavy + * I/O workload. + */ + if (!metadata_test_dirty(cache, cache_line)) { + OCF_DEBUG_MSG(cache, "Not dirty"); + continue; + } + + if (!metadata_test_valid_any(cache, cache_line)) { + OCF_DEBUG_MSG(cache, "No any valid"); + + /* + * Extremely disturbing cache line state + * Cache line (sector) cannot be dirty and not valid + */ + ENV_BUG(); + continue; + } + + /* Get mapping info */ + ocf_metadata_get_core_info(cache, cache_line, &core_id, + &core_sector); + + if (unlikely(!cache->core_obj[core_id].opened)) { + OCF_DEBUG_MSG(cache, "Core object inactive"); + continue; + } + + rq->map[i_out].core_id = core_id; + rq->map[i_out].core_line = core_sector; + rq->map[i_out].coll_idx = cache_line; + rq->map[i_out].status = LOOKUP_HIT; + rq->map[i_out].hash_key = i_out; + i_out++; + + if (max == i_out) { + err = _ocf_cleaner_do_fire(rq, i_out, attribs->do_sort); + if (err) { + _ocf_cleaner_fire_error(master, rq, err); + rq = NULL; + break; + } + i_out = 0; + rq = NULL; + } + } + + if (rq) { + err = _ocf_cleaner_do_fire(rq, i_out, attribs->do_sort); + if (err) + _ocf_cleaner_fire_error(master, rq, err); + rq = NULL; + i_out = 0; + } + + /* prevent cleaning completion race */ + _ocf_cleaner_complete_rq(master); + ocf_rq_put(master); +} + +static void ocf_cleaner_sync_end(void *private_data, int error) +{ + struct ocf_cleaner_sync *sync = private_data; + + OCF_DEBUG_TRACE(rq->cache); + if (error) + sync->error = error; + + env_completion_complete(&sync->cmpl); +} + +static int _ocf_cleaner_do_flush_data_getter(struct ocf_cache *cache, + void *context, uint32_t item, ocf_cache_line_t *line) +{ + struct flush_data *flush = context; + + if (flush[item].cache_line < cache->device->collision_table_entries) { + (*line) = flush[item].cache_line; + return 0; + } else { + return -1; + } +} + +int ocf_cleaner_do_flush_data(struct ocf_cache *cache, + struct flush_data *flush, uint32_t count, + struct ocf_cleaner_attribs *attribs) +{ + struct ocf_cleaner_sync sync; + + env_completion_init(&sync.cmpl); + sync.error = 0; + attribs->cmpl_context = &sync; + attribs->cmpl_fn = ocf_cleaner_sync_end; + attribs->getter = _ocf_cleaner_do_flush_data_getter; + attribs->getter_context = flush; + attribs->count = count; + + ocf_cleaner_fire(cache, attribs); + + if (attribs->metadata_locked) + OCF_METADATA_UNLOCK_WR(); + + env_completion_wait(&sync.cmpl); + + if (attribs->metadata_locked) + OCF_METADATA_LOCK_WR(); + + attribs->cmpl_context = NULL; + return sync.error; +} + +int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache, + struct flush_data *flush, uint32_t count, + struct ocf_cleaner_attribs *attribs) +{ + attribs->getter = _ocf_cleaner_do_flush_data_getter; + attribs->getter_context = flush; + attribs->count = count; + + ocf_cleaner_fire(cache, attribs); + + return 0; +} + +/* Helper function for 'sort' */ +static int _ocf_cleaner_cmp(const void *a, const void *b) +{ + struct flush_data *_a = (struct flush_data *)a; + struct flush_data *_b = (struct flush_data *)b; + + /* TODO: FIXME get rid of static */ + static uint32_t step = 0; + + OCF_COND_RESCHED(step, 1000000) + + if (_a->core_id == _b->core_id) + return (_a->core_line > _b->core_line) ? 1 : -1; + + return (_a->core_id > _b->core_id) ? 1 : -1; +} + +static void _ocf_cleaner_swap(void *a, void *b, int size) +{ + struct flush_data *_a = (struct flush_data *)a; + struct flush_data *_b = (struct flush_data *)b; + struct flush_data t; + + t = *_a; + *_a = *_b; + *_b = t; +} + +void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num) +{ + env_sort(tbl, num, sizeof(*tbl), _ocf_cleaner_cmp, _ocf_cleaner_swap); +} + +void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, + uint32_t num) +{ + int i; + + for (i = 0; i < num; i++) { + env_sort(fctbl[i].flush_data, fctbl[i].count, + sizeof(*fctbl[i].flush_data), _ocf_cleaner_cmp, + _ocf_cleaner_swap); + } +} diff --git a/src/utils/utils_cleaner.h b/src/utils/utils_cleaner.h new file mode 100644 index 0000000..54412dc --- /dev/null +++ b/src/utils/utils_cleaner.h @@ -0,0 +1,133 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_CLEANER_H_ +#define UTILS_CLEANER_H_ + +/** + * @brief Getter for next cache line to be cleaned + * + * @param cache[in] Cache instance + * @param getter_context[in] Context for cleaner caller + * @param item[in] Current iteration item when collection cache lines + * @param line[out] line to be cleaned + * @retval 0 When caller return zero it means take this cache line to clean + * @retval Non-zero Means skip this cache line and do not clean it + */ +typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache, + void *getter_context, uint32_t item, ocf_cache_line_t *line); + +/** + * @brief Cleaning attributes for clean request + */ +struct ocf_cleaner_attribs { + uint8_t cache_line_lock : 1; /*!< Clean under cache line lock */ + + uint8_t metadata_locked : 1; /*< true if caller holds metadata lock */ + + uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */ + + uint32_t count; /*!< max number of cache lines to be cleaned */ + + void *cmpl_context; /*!< Completion context of cleaning requester */ + ocf_end_t cmpl_fn; /*!< Completion function of requester */ + + ocf_cleaner_get_item getter; + /*!< Getter for collecting cache lines which will be cleaned */ + void *getter_context; + /*!< Context for getting cache lines */ + uint32_t getter_item; + /*!< Additional variable that can be used by cleaner caller + * to iterate over items + */ + + uint32_t io_queue; +}; + +/** + * @brief Flush table entry structure + */ +struct flush_data { + uint64_t core_line; + uint32_t cache_line; + ocf_core_id_t core_id; +}; + +/** + * @brief Flush table container + */ +struct flush_container { + ocf_core_id_t core_id; + struct flush_data *flush_data; + uint32_t count; + uint32_t iter; + + struct ocf_cleaner_attribs attribs; + ocf_cache_t cache; + env_atomic *progress; + env_atomic *error; + env_waitqueue *wq; + env_atomic completed; + + uint64_t flush_portion; + uint64_t ticks1; + uint64_t ticks2; +}; + +/** + * @brief Run cleaning procedure + * + * @param cache - Cache instance + * @param attribs - Cleaning attributes + */ +void ocf_cleaner_fire(struct ocf_cache *cache, + const struct ocf_cleaner_attribs *attribs); + +/** + * @brief Perform cleaning procedure for specified flush data synchronously. + * Only dirty cache lines will be cleaned. + * + * @param cache - Cache instance + * @param flush - flush data to be cleaned + * @param count - Count of cache lines to be cleaned + * @param attribs - Cleaning attributes + * @return - Cleaning result. 0 - no errors, non zero errors occurred + */ +int ocf_cleaner_do_flush_data(struct ocf_cache *cache, + struct flush_data *flush, uint32_t count, + struct ocf_cleaner_attribs *attribs); + +/** + * @brief Perform cleaning procedure for specified flush data. Only dirty + * cache lines will be cleaned. + * + * @param cache - Cache instance + * @param flush - flush data to be cleaned + * @param count - Count of cache lines to be cleaned + * @param attribs - Cleaning attributes + * @return - Cleaning result. 0 - no errors, non zero errors occurred + */ +int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache, + struct flush_data *flush, uint32_t count, + struct ocf_cleaner_attribs *attribs); + +/** + * @brief Sort flush data by core sector + * + * @param tbl Flush data to sort + * @param num Number of entries in tbl + */ +void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num); + +/** + * @brief Sort flush data in all flush containters + * + * @param tbl Flush containers to sort + * @param num Number of entries in fctbl + */ +void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl, + uint32_t num); + +#endif /* UTILS_CLEANER_H_ */ diff --git a/src/utils/utils_device.h b/src/utils/utils_device.h new file mode 100644 index 0000000..17585f6 --- /dev/null +++ b/src/utils/utils_device.h @@ -0,0 +1,94 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_DEVICE_H_ +#define UTILS_DEVICE_H_ + +static inline int _ocf_uuid_set(const struct ocf_data_obj_uuid *uuid, + struct ocf_metadata_uuid *muuid) +{ + int result; + + if (!uuid || !muuid) { + return -EINVAL; + } + + if (!uuid->data || !muuid->data) { + return -EINVAL; + } + + if (uuid->size > sizeof(muuid->data)) { + return -ENOBUFS; + } + + result = env_memcpy(muuid->data, sizeof(muuid->data), uuid->data, uuid->size); + if (result) + return result; + result = env_memset(muuid->data + uuid->size, + sizeof(muuid->data) - uuid->size, 0); + if (result) + return result; + muuid->size = uuid->size; + + return 0; +} + +static inline int ocf_uuid_cache_set(ocf_cache_t cache, + const struct ocf_data_obj_uuid *uuid) +{ + int result; + void *u; + + if (!uuid) + return -EINVAL; + + u = env_vmalloc(uuid->size); + if (!u) + return -ENOMEM; + + cache->device->obj.uuid.size = 0; + result = env_memcpy(u, uuid->size, + uuid->data, uuid->size); + if (result) { + env_vfree(u); + return result; + } + + cache->device->obj.uuid.data = u; + cache->device->obj.uuid.size = uuid->size; + + return 0; +} + +static inline void ocf_uuid_cache_clear(ocf_cache_t cache) +{ + env_vfree(cache->device->obj.uuid.data); + cache->device->obj.uuid.size = 0; +} + +static inline int ocf_uuid_core_set(ocf_cache_t cache, ocf_core_t core, + const struct ocf_data_obj_uuid *uuid) +{ + + struct ocf_data_obj_uuid *cuuid = &ocf_core_get_data_object(core)->uuid; + struct ocf_metadata_uuid *muuid = ocf_metadata_get_core_uuid(cache, + ocf_core_get_id(core)); + if (_ocf_uuid_set(uuid, muuid)) { + return -ENOBUFS; + } + + cuuid->data = muuid->data; + cuuid->size = muuid->size; + + return 0; +} + +static inline void ocf_uuid_core_clear(ocf_cache_t cache, ocf_core_t core) +{ + struct ocf_data_obj_uuid uuid = { .size = 0, }; + ocf_uuid_core_set(cache, core, &uuid); +} + +#endif /* UTILS_MEM_H_ */ diff --git a/src/utils/utils_io.c b/src/utils/utils_io.c new file mode 100644 index 0000000..9731e7e --- /dev/null +++ b/src/utils/utils_io.c @@ -0,0 +1,383 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "../ocf_data_obj_priv.h" +#include "../ocf_request.h" +#include "utils_io.h" +#include "utils_cache_line.h" + +struct ocf_submit_io_wait_context { + env_completion complete; + int error; + env_atomic rq_remaining; +}; + +/* + * IO discard context + */ +struct discard_io_request { + void *context; + env_atomic req_remaining; + env_completion completion; + int error; +}; + +void ocf_submit_obj_flush(ocf_data_obj_t obj, ocf_end_t callback, void *ctx) +{ + struct ocf_io *io; + + io = ocf_dobj_new_io(obj); + if (!io) { + callback(ctx, -ENOMEM); + return; + } + + ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0); + ocf_io_set_default_cmpl(io, ctx, callback); + + ocf_dobj_submit_flush(io); +} + +static void _ocf_obj_flush_end(void *_cntx, int err) +{ + struct ocf_submit_io_wait_context *cntx = _cntx; + cntx->error = err; + env_completion_complete(&cntx->complete); +} + +int ocf_submit_obj_flush_wait(ocf_data_obj_t obj) +{ + struct ocf_submit_io_wait_context cntx = { }; + env_atomic_set(&cntx.rq_remaining, 1); + env_completion_init(&cntx.complete); + + ocf_submit_obj_flush(obj, _ocf_obj_flush_end, &cntx); + + env_completion_wait(&cntx.complete); + + return cntx.error; + +} + +static void ocf_submit_obj_discard_wait_io(struct ocf_io *io, int error) +{ + struct ocf_submit_io_wait_context *cntx = io->priv1; + + if (error) + cntx->error = error; + + ocf_io_put(io); /* Release IO */ + + if (env_atomic_dec_return(&cntx->rq_remaining)) + return; + + /* All discard IO handled, signal it by setting completion */ + env_completion_complete(&cntx->complete); +} + +int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr, + uint64_t length) +{ + struct ocf_submit_io_wait_context cntx = { }; + uint32_t bytes; + uint32_t max_length = ~0; + + ENV_BUG_ON(env_memset(&cntx, sizeof(cntx), 0)); + env_atomic_set(&cntx.rq_remaining, 1); + env_completion_init(&cntx.complete); + + while (length) { + struct ocf_io *io = ocf_dobj_new_io(obj); + + if (!io) { + cntx.error = -ENOMEM; + break; + } + + if (length > max_length) + bytes = max_length; + else + bytes = length; + + env_atomic_inc(&cntx.rq_remaining); + + ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0); + ocf_io_set_cmpl(io, &cntx, NULL, + ocf_submit_obj_discard_wait_io); + ocf_dobj_submit_discard(io); + + addr += bytes; + length -= bytes; + } + + if (env_atomic_dec_return(&cntx.rq_remaining) == 0) + env_completion_complete(&cntx.complete); + + env_completion_wait(&cntx.complete); + + return cntx.error; +} + +static void ocf_submit_obj_zeroes_wait_io(struct ocf_io *io, int error) +{ + struct ocf_submit_io_wait_context *cntx = io->priv1; + + if (error) + cntx->error = error; + + env_completion_complete(&cntx->complete); +} + +int ocf_submit_write_zeroes_wait(ocf_data_obj_t obj, uint64_t addr, + uint64_t length) +{ + struct ocf_submit_io_wait_context cntx = { }; + uint32_t bytes; + uint32_t max_length = ~((uint32_t)PAGE_SIZE - 1); + uint32_t step = 0; + struct ocf_io *io; + + io = ocf_dobj_new_io(obj); + if (!io) + return -ENOMEM; + + while (length) { + env_completion_init(&cntx.complete); + + bytes = MIN(length, max_length); + + ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0); + ocf_io_set_cmpl(io, &cntx, NULL, + ocf_submit_obj_zeroes_wait_io); + ocf_dobj_submit_write_zeroes(io); + + addr += bytes; + length -= bytes; + + env_completion_wait(&cntx.complete); + if (cntx.error) + break; + + OCF_COND_RESCHED_DEFAULT(step); + } + + ocf_io_put(io); + + return cntx.error; +} + +int ocf_submit_cache_page(struct ocf_cache *cache, uint64_t addr, + int dir, void *buffer) +{ + ctx_data_t *data; + struct ocf_io *io; + int result = 0; + + /* Allocate resources for IO */ + io = ocf_dobj_new_io(&cache->device->obj); + data = ctx_data_alloc(cache->owner, 1); + + if (!io || !data) { + result = -ENOMEM; + goto end; + } + + if (dir == OCF_WRITE) + ctx_data_wr_check(cache->owner, data, buffer, PAGE_SIZE); + + result = ocf_io_set_data(io, data, 0); + if (result) + goto end; + + ocf_io_configure(io, addr, PAGE_SIZE, dir, 0, 0); + + result = ocf_submit_io_wait(io); + if (result) + goto end; + + if (dir == OCF_READ) + ctx_data_rd_check(cache->owner, buffer, data, PAGE_SIZE); +end: + if (io) + ocf_io_put(io); + ctx_data_free(cache->owner, data); + return result; +} + +void ocf_submit_obj_discard(ocf_data_obj_t obj, struct ocf_request *req, + ocf_end_t callback, void *ctx) +{ + struct ocf_io *io = ocf_dobj_new_io(obj); + + if (!io) { + callback(ctx, -ENOMEM); + return; + } + + ocf_io_configure(io, SECTORS_TO_BYTES(req->discard.sector), + SECTORS_TO_BYTES(req->discard.nr_sects), + OCF_WRITE, 0, 0); + ocf_io_set_default_cmpl(io, ctx, callback); + ocf_io_set_data(io, req->data, 0); + + ocf_dobj_submit_discard(io); +} + +void ocf_submit_cache_reqs(struct ocf_cache *cache, + struct ocf_map_info *map_info, struct ocf_request *req, int dir, + unsigned int reqs, ocf_end_t callback, void *ctx) +{ + struct ocf_counters_block *cache_stats; + uint64_t flags = req->io ? req->io->flags : 0; + uint32_t class = req->io ? req->io->class : 0; + uint64_t addr, bytes, total_bytes = 0; + struct ocf_io *io; + uint32_t i; + int err; + + cache_stats = &cache->core_obj[req->core_id]. + counters->cache_blocks; + + if (reqs == 1) { + io = ocf_new_cache_io(cache); + if (!io) { + callback(ctx, -ENOMEM); + goto update_stats; + } + + addr = ocf_metadata_map_lg2phy(cache, + map_info[0].coll_idx); + addr *= ocf_line_size(cache); + addr += cache->device->metadata_offset; + addr += (req->byte_position % ocf_line_size(cache)); + bytes = req->byte_length; + + ocf_io_configure(io, addr, bytes, dir, class, flags); + ocf_io_set_default_cmpl(io, ctx, callback); + + err = ocf_io_set_data(io, req->data, 0); + if (err) { + ocf_io_put(io); + callback(ctx, err); + goto update_stats; + } + + ocf_dobj_submit_io(io); + total_bytes = req->byte_length; + + goto update_stats; + } + + /* Issue requests to cache. */ + for (i = 0; i < reqs; i++) { + io = ocf_new_cache_io(cache); + + if (!io) { + /* Finish all IOs which left with ERROR */ + for (; i < reqs; i++) + callback(ctx, -ENOMEM); + goto update_stats; + } + + addr = ocf_metadata_map_lg2phy(cache, + map_info[i].coll_idx); + addr *= ocf_line_size(cache); + addr += cache->device->metadata_offset; + bytes = ocf_line_size(cache); + + if (i == 0) { + uint64_t seek = (req->byte_position % + ocf_line_size(cache)); + + addr += seek; + bytes -= seek; + } else if (i == (reqs - 1)) { + uint64_t skip = (ocf_line_size(cache) - + ((req->byte_position + req->byte_length) % + ocf_line_size(cache))) % ocf_line_size(cache); + + bytes -= skip; + } + + ocf_io_configure(io, addr, bytes, dir, class, flags); + ocf_io_set_default_cmpl(io, ctx, callback); + + err = ocf_io_set_data(io, req->data, total_bytes); + if (err) { + ocf_io_put(io); + /* Finish all IOs which left with ERROR */ + for (; i < reqs; i++) + callback(ctx, err); + goto update_stats; + } + ocf_dobj_submit_io(io); + total_bytes += bytes; + } + +update_stats: + if (dir == OCF_WRITE) + env_atomic64_add(total_bytes, &cache_stats->write_bytes); + else if (dir == OCF_READ) + env_atomic64_add(total_bytes, &cache_stats->read_bytes); +} + +void ocf_submit_obj_req(ocf_data_obj_t obj, struct ocf_request *rq, + int dir, ocf_end_t callback, void *ctx) +{ + struct ocf_cache *cache = rq->cache; + struct ocf_counters_block *core_stats; + uint64_t flags = rq->io ? rq->io->flags : 0; + uint32_t class = rq->io ? rq->io->class : 0; + struct ocf_io *io; + int err; + + core_stats = &cache->core_obj[rq->core_id]. + counters->core_blocks; + if (dir == OCF_WRITE) + env_atomic64_add(rq->byte_length, &core_stats->write_bytes); + else if (dir == OCF_READ) + env_atomic64_add(rq->byte_length, &core_stats->read_bytes); + + io = ocf_dobj_new_io(obj); + if (!io) { + callback(ctx, -ENOMEM); + return; + } + + ocf_io_configure(io, rq->byte_position, rq->byte_length, dir, + class, flags); + ocf_io_set_default_cmpl(io, ctx, callback); + err = ocf_io_set_data(io, rq->data, 0); + if (err) { + ocf_io_put(io); + callback(ctx, err); + return; + } + ocf_dobj_submit_io(io); +} + +static void ocf_submit_io_wait_end(struct ocf_io *io, int error) +{ + struct ocf_submit_io_wait_context *context = io->priv1; + + context->error |= error; + env_completion_complete(&context->complete); +} + +int ocf_submit_io_wait(struct ocf_io *io) +{ + struct ocf_submit_io_wait_context context; + + ENV_BUG_ON(env_memset(&context, sizeof(context), 0)); + env_completion_init(&context.complete); + context.error = 0; + + ocf_io_set_cmpl(io, &context, NULL, ocf_submit_io_wait_end); + ocf_dobj_submit_io(io); + env_completion_wait(&context.complete); + return context.error; +} diff --git a/src/utils/utils_io.h b/src/utils/utils_io.h new file mode 100644 index 0000000..68fa083 --- /dev/null +++ b/src/utils/utils_io.h @@ -0,0 +1,86 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_IO_H_ +#define UTILS_IO_H_ + +#include "../ocf_request.h" + +/** + * Checks if 2 IOs are overlapping. + * @param start1 start of first range (inclusive) + * @param end1 end of first range (exclusive) + * @param start2 start of second range (inclusive) + * @param end2 end of second range (exclusive) + * @return 0 in case overlap is not detected, otherwise 1 + */ +static inline int ocf_io_range_overlaps(uint32_t start1, uint32_t end1, + uint32_t start2, uint32_t end2) +{ + if (start2 <= start1 && end2 >= start1) + return 1; + + if (start2 >= start1 && end1 >= start2) + return 1; + + return 0; +} + +/** + * Checks if 2 IOs are overlapping. + * @param start1 start of first range (inclusive) + * @param count1 no of bytes, cachelines (etc) for first range + * @param start2 start of second range (inclusive) + * @param count2 no of bytes, cachelines (etc) for second range + * @return 0 in case overlap is not detected, otherwise 1 + */ +static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1, + uint32_t start2, uint32_t count2) +{ + return ocf_io_range_overlaps(start1, start1 + count1 - 1, start2, + start2 + count2 - 1); +} + +int ocf_submit_io_wait(struct ocf_io *io); + +void ocf_submit_obj_flush(ocf_data_obj_t obj, ocf_end_t callback, + void *context); + +int ocf_submit_obj_flush_wait(ocf_data_obj_t obj); + +int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr, + uint64_t length); + +void ocf_submit_obj_discard(ocf_data_obj_t obj, struct ocf_request *req, + ocf_end_t callback, void *ctx); + +int ocf_submit_write_zeroes_wait(ocf_data_obj_t obj, uint64_t addr, + uint64_t length); + +int ocf_submit_cache_page(struct ocf_cache *cache, uint64_t addr, + int dir, void *buffer); + +void ocf_submit_obj_req(ocf_data_obj_t obj, struct ocf_request *req, + int dir, ocf_end_t callback, void *ctx); + + +void ocf_submit_cache_reqs(struct ocf_cache *cache, + struct ocf_map_info *map_info, struct ocf_request *req, int dir, + unsigned int reqs, ocf_end_t callback, void *ctx); + +static inline struct ocf_io *ocf_new_cache_io(struct ocf_cache *cache) +{ + return ocf_dobj_new_io(&cache->device->obj); +} + +static inline struct ocf_io *ocf_new_core_io(struct ocf_cache *cache, + ocf_core_id_t core_id) +{ + ENV_BUG_ON(core_id >= OCF_CORE_MAX); + + return ocf_dobj_new_io(&cache->core_obj[core_id].obj); +} + +#endif /* UTILS_IO_H_ */ diff --git a/src/utils/utils_list.c b/src/utils/utils_list.c new file mode 100644 index 0000000..8bc2e63 --- /dev/null +++ b/src/utils/utils_list.c @@ -0,0 +1,64 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "utils_list.h" + +void ocf_lst_sort(struct ocf_lst *lst) +{ + ocf_cache_line_t iter_idx; + ocf_cache_line_t next_idx; + struct ocf_lst_entry *iter; + + if (!lst->cmp) { + /* No comparator, no needed to sort */ + return; + } + + if (ocf_lst_empty(lst)) { + /* List is empty nothing to do */ + return; + } + + /* Get iterator - first element on the list, and one after */ + iter_idx = lst->head->next; + iter = lst->getter(lst->cache, iter_idx); + next_idx = iter->next; + lst->getter(lst->cache, iter->next); + + /* Initialize list to initial empty state, it will be empty */ + lst->head->next = lst->invalid; + lst->head->prev = lst->invalid; + + while (iter_idx != lst->invalid) { + ocf_lst_init_entry(lst, iter); + + if (ocf_lst_empty(lst)) { + /* Put first at the the list */ + ocf_lst_add(lst, iter_idx); + } else { + /* search for place where put element at the list */ + struct ocf_lst_entry *pos; + ocf_cache_line_t pos_idx = lst->invalid; + + for_each_lst(lst, pos, pos_idx) + if (lst->cmp(lst->cache, pos, iter) > 0) + break; + + if (lst->invalid == pos_idx) { + /* Put at the end of list */ + ocf_lst_add_tail(lst, iter_idx); + } else { + /* Position is known, put it before */ + ocf_lst_add_before(lst, pos_idx, iter_idx); + } + } + + /* Switch to next */ + iter_idx = next_idx; + iter = lst->getter(lst->cache, iter_idx); + next_idx = iter->next; + } +} diff --git a/src/utils/utils_list.h b/src/utils/utils_list.h new file mode 100644 index 0000000..5011547 --- /dev/null +++ b/src/utils/utils_list.h @@ -0,0 +1,207 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __UTILS_LIST_H__ +#define __UTILS_LIST_H__ + +#include "ocf_env.h" +#include "../ocf_ctx_priv.h" +#include "ocf/ocf_cache.h" + +#define OCF_LST_DBG 1 + +#if 1 == OCF_LST_DBG +#define OCF_LST_DBG_ON(lst, cond) ({ \ + if (cond) { \ + ocf_log(ocf_cache_get_ctx(lst->cache), log_crit, \ + "OCF list critical problem (%s:%u)\n", \ + __func__, __LINE__); \ + ocf_log_stack_trace(ocf_cache_get_ctx(lst->cache)); \ + } \ +}) +#else +#define OCF_LST_DBG_ON(lst, cond) +#endif + +#define OCF_LST_ENTRY_OUT(lst) ((lst)->invalid + 1) + +struct ocf_lst_entry { + ocf_cache_line_t next; + ocf_cache_line_t prev; +}; + +typedef struct ocf_lst_entry *(*ocf_mlst_getter)( + struct ocf_cache *cache, ocf_cache_line_t idx); + +typedef int (*ocf_mlst_cmp)(struct ocf_cache *cache, + struct ocf_lst_entry *e1, struct ocf_lst_entry *e2); + +struct ocf_lst { + struct ocf_lst_entry *head; + ocf_cache_line_t invalid; + ocf_mlst_getter getter; + ocf_mlst_cmp cmp; + struct ocf_cache *cache; + + struct { + uint32_t active : 1; + } flags; +}; + +static inline void ocf_lst_init_entry(struct ocf_lst *lst, + struct ocf_lst_entry *entry) +{ + entry->next = entry->prev = OCF_LST_ENTRY_OUT(lst); +} + +static inline bool ocf_lst_is_entry(struct ocf_lst *lst, + struct ocf_lst_entry *entry) +{ + if (entry->next == OCF_LST_ENTRY_OUT(lst) && + entry->prev == OCF_LST_ENTRY_OUT(lst)) + return false; + + if (entry->next < OCF_LST_ENTRY_OUT(lst) && + entry->prev < OCF_LST_ENTRY_OUT(lst)) + return true; + + ENV_BUG(); + return false; +} + +static inline void ocf_lst_init(struct ocf_cache *cache, + struct ocf_lst *lst, ocf_cache_line_t invalid, + ocf_mlst_getter getter, ocf_mlst_cmp cmp) +{ + ocf_cache_line_t idx; + + ENV_BUG_ON(env_memset(lst, sizeof(*lst), 0)); + + lst->head = getter(cache, invalid); + lst->head->next = invalid; + lst->head->prev = invalid; + lst->invalid = invalid; + lst->getter = getter; + lst->cmp = cmp; + lst->cache = cache; + + for (idx = 0; idx < lst->invalid; idx++) { + struct ocf_lst_entry *entry = getter(cache, idx); + + ocf_lst_init_entry(lst, entry); + } +} + +static inline void ocf_lst_add_after(struct ocf_lst *lst, + ocf_cache_line_t at, ocf_cache_line_t idx) +{ + struct ocf_lst_entry *after = lst->getter(lst->cache, at); + struct ocf_lst_entry *next = lst->getter(lst->cache, after->next); + struct ocf_lst_entry *this = lst->getter(lst->cache, idx); + + OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, after)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next)); + + this->next = after->next; + this->prev = at; + after->next = idx; + next->prev = idx; +} + +static inline void ocf_lst_add_before(struct ocf_lst *lst, + ocf_cache_line_t at, ocf_cache_line_t idx) +{ + struct ocf_lst_entry *before = lst->getter(lst->cache, at); + struct ocf_lst_entry *prev = lst->getter(lst->cache, before->prev); + struct ocf_lst_entry *this = lst->getter(lst->cache, idx); + + OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, before)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev)); + + this->next = at; + this->prev = before->prev; + before->prev = idx; + prev->next = idx; +} + +static inline void ocf_lst_add(struct ocf_lst *lst, ocf_cache_line_t idx) +{ + struct ocf_lst_entry *this = lst->getter(lst->cache, idx); + struct ocf_lst_entry *next = lst->getter(lst->cache, lst->head->next); + + OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next)); + + this->next = lst->head->next; + next->prev = idx; + lst->head->next = idx; + this->prev = lst->invalid; +} + +static inline void ocf_lst_add_tail(struct ocf_lst *lst, ocf_cache_line_t idx) +{ + struct ocf_lst_entry *this = lst->getter(lst->cache, idx); + struct ocf_lst_entry *prev = lst->getter(lst->cache, lst->head->prev); + + OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev)); + + this->next = lst->invalid; + this->prev = lst->head->prev; + prev->next = idx; + lst->head->prev = idx; +} + +static inline void ocf_lst_del(struct ocf_lst *lst, ocf_cache_line_t idx) +{ + struct ocf_lst_entry *this = lst->getter(lst->cache, idx); + struct ocf_lst_entry *next = lst->getter(lst->cache, this->next); + struct ocf_lst_entry *prev = lst->getter(lst->cache, this->prev); + + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, this)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next)); + OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev)); + + prev->next = this->next; + next->prev = this->prev; + + ocf_lst_init_entry(lst, this); +} + +static inline ocf_cache_line_t ocf_lst_head(struct ocf_lst *lst) +{ + return lst->head->next; +} + +static inline ocf_cache_line_t ocf_lst_tail(struct ocf_lst *lst) +{ + return lst->head->prev; +} + +static inline bool ocf_lst_empty(struct ocf_lst *lst) +{ + if (lst->head->next == lst->invalid) + return true; + else + return false; +} + +void ocf_lst_sort(struct ocf_lst *lst); + +#define for_each_lst(lst, entry, id) \ +for (id = (lst)->head->next, entry = (lst)->getter((lst)->cache, id); \ + entry != (lst)->head; id = entry->next, \ + entry = (lst)->getter((lst)->cache, id)) + +#define for_each_lst_entry(lst, entry, id, type, member) \ +for (id = (lst)->head->next, \ + entry = container_of((lst)->getter((lst)->cache, id), type, member); \ + entry != container_of((lst)->head, type, member); \ + id = entry->member.next, \ + entry = container_of((lst)->getter((lst)->cache, id), type, member)) + +#endif /* __UTILS_LIST_H__ */ diff --git a/src/utils/utils_part.c b/src/utils/utils_part.c new file mode 100644 index 0000000..df077ee --- /dev/null +++ b/src/utils/utils_part.c @@ -0,0 +1,194 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "../ocf_request.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../eviction/ops.h" +#include "utils_part.h" + +static struct ocf_lst_entry *ocf_part_lst_getter_valid( + struct ocf_cache *cache, ocf_cache_line_t idx) +{ + ENV_BUG_ON(idx > OCF_IO_CLASS_MAX); + return &cache->user_parts[idx].lst_valid; +} + + +static int ocf_part_lst_cmp_valid(struct ocf_cache *cache, + struct ocf_lst_entry *e1, struct ocf_lst_entry *e2) +{ + struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part, + lst_valid); + struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part, + lst_valid); + size_t p1_size = ocf_cache_is_device_attached(cache) ? + p1->runtime->curr_size : 0; + size_t p2_size = ocf_cache_is_device_attached(cache) ? + p2->runtime->curr_size : 0; + + int v1 = p1->config->priority; + int v2 = p2->config->priority; + + /* + * If partition is invalid the priority depends on current size: + * 1. Partition is empty - move to the end of list + * 2. Partition is not empty - move to the beginning of the list. This + * partition will be evicted first + */ + + if (p1->config->priority == OCF_IO_CLASS_PRIO_PINNED) + p1->config->flags.eviction = false; + else + p1->config->flags.eviction = true; + + if (p2->config->priority == OCF_IO_CLASS_PRIO_PINNED) + p2->config->flags.eviction = false; + else + p2->config->flags.eviction = true; + + if (!p1->config->flags.valid) { + if (p1_size) { + v1 = SHRT_MAX; + p1->config->flags.eviction = true; + } else { + v1 = SHRT_MIN; + p1->config->flags.eviction = false; + } + } + + if (!p2->config->flags.valid) { + if (p2_size) { + v2 = SHRT_MAX; + p1->config->flags.eviction = true; + } else { + v2 = SHRT_MIN; + p2->config->flags.eviction = false; + } + } + + if (v1 == v2) { + v1 = p1 - cache->user_parts; + v2 = p2 - cache->user_parts; + } + + return v2 - v1; +} + +int ocf_part_init(struct ocf_cache *cache) +{ + ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX, + ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid); + + return 0; +} + +void ocf_part_move(struct ocf_request *rq) +{ + struct ocf_cache *cache = rq->cache; + struct ocf_map_info *entry; + ocf_cache_line_t line; + ocf_part_id_t id_old, id_new; + uint32_t i; + ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type; + + ENV_BUG_ON(type >= ocf_cleaning_max); + + entry = &rq->map[0]; + for (i = 0; i < rq->core_line_count; i++, entry++) { + if (!entry->re_part) { + /* Changing partition not required */ + continue; + } + + if (entry->status != LOOKUP_HIT) { + /* No HIT */ + continue; + } + + line = entry->coll_idx; + id_old = ocf_metadata_get_partition_id(cache, line); + id_new = rq->part_id; + + ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX || + id_new >= OCF_IO_CLASS_MAX); + + if (id_old == id_new) { + /* Partition of the request and cache line is the same, + * no need to change partition + */ + continue; + } + + /* Remove from old eviction */ + ocf_eviction_purge_cache_line(cache, line); + + if (metadata_test_dirty(cache, line)) { + /* + * Remove cline from cleaning - this if for ioclass + * oriented cleaning policy (e.g. ALRU). + * TODO: Consider adding update_cache_line() ops + * to cleaning policy to let policies handle this. + */ + if (cleaning_policy_ops[type].purge_cache_block) + cleaning_policy_ops[type]. + purge_cache_block(cache, line); + } + + /* Let's change partition */ + ocf_metadata_remove_from_partition(cache, id_old, line); + ocf_metadata_add_to_partition(cache, id_new, line); + + /* Add to new eviction */ + ocf_eviction_init_cache_line(cache, line, id_new); + ocf_eviction_set_hot_cache_line(cache, line); + + /* Check if cache line is dirty. If yes then need to change + * cleaning policy and update partition dirty clines + * statistics. + */ + if (metadata_test_dirty(cache, line)) { + /* Add cline back to cleaning policy */ + if (cleaning_policy_ops[type].set_hot_cache_line) + cleaning_policy_ops[type]. + set_hot_cache_line(cache, line); + + env_atomic_inc(&cache->core_runtime_meta[rq->core_id]. + part_counters[id_new].dirty_clines); + env_atomic_dec(&cache->core_runtime_meta[rq->core_id]. + part_counters[id_old].dirty_clines); + } + + env_atomic_inc(&cache->core_runtime_meta[rq->core_id]. + part_counters[id_new].cached_clines); + env_atomic_dec(&cache->core_runtime_meta[rq->core_id]. + part_counters[id_old].cached_clines); + + /* DONE */ + } +} + +void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id, + bool valid) +{ + struct ocf_user_part *part = &cache->user_parts[id]; + + if (valid ^ part->config->flags.valid) { + if (valid) { + part->config->flags.valid = true; + cache->conf_meta->valid_parts_no++; + } else { + part->config->flags.valid = false; + cache->conf_meta->valid_parts_no--; + part->config->priority = OCF_IO_CLASS_PRIO_LOWEST; + part->config->min_size = 0; + part->config->max_size = PARTITION_SIZE_MAX; + ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name), + "Inactive", 9)); + } + } +} diff --git a/src/utils/utils_part.h b/src/utils/utils_part.h new file mode 100644 index 0000000..6bbb326 --- /dev/null +++ b/src/utils/utils_part.h @@ -0,0 +1,117 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __UTILS_PARTITION_H__ +#define __UTILS_PARTITION_H__ + +#include "../ocf_request.h" +#include "../engine/cache_engine.h" +#include "../metadata/metadata_partition.h" + +int ocf_part_init(struct ocf_cache *cache); + +static inline bool ocf_part_is_valid(struct ocf_user_part *part) +{ + return !!part->config->flags.valid; +} + +static inline void ocf_part_set_prio(struct ocf_cache *cache, + struct ocf_user_part *part, int16_t prio) +{ + if (part->config->priority != prio) + part->config->priority = prio; +} + +static inline int16_t ocf_part_get_prio(struct ocf_cache *cache, + ocf_part_id_t part_id) +{ + if (part_id < OCF_IO_CLASS_MAX) + return cache->user_parts[part_id].config->priority; + + return OCF_IO_CLASS_PRIO_LOWEST; +} + +void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id, + bool valid); + +static inline bool ocf_part_is_added(struct ocf_user_part *part) +{ + return !!part->config->flags.added; +} + +static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class) +{ + if (class < OCF_IO_CLASS_MAX) + if (cache->user_parts[class].config->flags.valid) + return class; + + return PARTITION_DEFAULT; +} + +void ocf_part_move(struct ocf_request *rq); + +#define for_each_part(cache, part, id) \ + for_each_lst_entry(&cache->lst_part, part, id, \ + struct ocf_user_part, lst_valid) + +static inline void ocf_part_sort(struct ocf_cache *cache) +{ + ocf_lst_sort(&cache->lst_part); +} + +static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache, + ocf_part_id_t part_id) +{ + if (part_id < OCF_IO_CLASS_MAX) + return cache->user_parts[part_id].config->cache_mode; + return ocf_cache_mode_none; +} + +static inline bool ocf_part_is_prio_valid(int64_t prio) +{ + switch (prio) { + case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST: + case OCF_IO_CLASS_PRIO_PINNED: + return true; + + default: + return false; + } +} + +/** + * routine checks for validity of a partition name. + * + * Following condition is checked: + * - string too long + * - string containing invalid characters (outside of low ascii) + * Following condition is NOT cheched: + * - empty string. (empty string is NOT a valid partition name, but + * this function returns true on empty string nevertheless). + * + * @return returns true if partition name is a valid name + */ +static inline bool ocf_part_is_name_valid(const char *name) +{ + uint32_t length = 0; + + while (*name) { + if (*name < ' ' || *name > '~') + return false; + + if (',' == *name || '"' == *name) + return false; + + name++; + length++; + + if (length >= OCF_IO_CLASS_NAME_MAX) + return false; + } + + return true; +} + +#endif /* __UTILS_PARTITION_H__ */ diff --git a/src/utils/utils_rq.c b/src/utils/utils_rq.c new file mode 100644 index 0000000..7e8dda1 --- /dev/null +++ b/src/utils/utils_rq.c @@ -0,0 +1,316 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#include "ocf/ocf.h" +#include "utils_rq.h" +#include "utils_cache_line.h" +#include "../ocf_request.h" +#include "../ocf_cache_priv.h" + +#define OCF_UTILS_RQ_DEBUG 0 + +#if 1 == OCF_UTILS_RQ_DEBUG +#define OCF_DEBUG_TRACE(cache) \ + ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__) + +#define OCF_DEBUG_PARAM(cache, format, ...) \ + ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \ + __func__, ##__VA_ARGS__) +#else +#define OCF_DEBUG_TRACE(cache) +#define OCF_DEBUG_PARAM(cache, format, ...) +#endif + +enum ocf_rq_size { + ocf_rq_size_1 = 0, + ocf_rq_size_2, + ocf_rq_size_4, + ocf_rq_size_8, + ocf_rq_size_16, + ocf_rq_size_32, + ocf_rq_size_64, + ocf_rq_size_128, + ocf_rq_size_max, +}; + +struct ocf_rq_allocator { + env_allocator *allocator[ocf_rq_size_max]; + size_t size[ocf_rq_size_max]; +}; + +static inline size_t ocf_rq_sizeof_map(struct ocf_request *rq) +{ + uint32_t lines = rq->alloc_core_line_count; + size_t size = (lines * sizeof(struct ocf_map_info)); + + ENV_BUG_ON(lines == 0); + return size; +} + +static inline size_t ocf_rq_sizeof(uint32_t lines) +{ + size_t size = sizeof(struct ocf_request) + + (lines * sizeof(struct ocf_map_info)); + + ENV_BUG_ON(lines == 0); + return size; +} + +#define ALLOCATOR_NAME_FMT "ocf_rq_%u" +/* Max number of digits in decimal representation of unsigned int is 10 */ +#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10) + +int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx) +{ + int i; + struct ocf_rq_allocator *rq; + char name[ALLOCATOR_NAME_MAX] = { '\0' }; + + OCF_DEBUG_TRACE(cache); + + ocf_ctx->resources.rq = env_zalloc(sizeof(*(ocf_ctx->resources.rq)), + ENV_MEM_NORMAL); + rq = ocf_ctx->resources.rq; + + if (!rq) + goto ocf_utils_rq_init_ERROR; + + for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) { + rq->size[i] = ocf_rq_sizeof(1 << i); + + if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT, + (1 << i)) < 0) { + goto ocf_utils_rq_init_ERROR; + } + + rq->allocator[i] = env_allocator_create(rq->size[i], name); + + if (!rq->allocator[i]) + goto ocf_utils_rq_init_ERROR; + + OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, " + "size = %lu", 1 << i, rq->size[i]); + } + + return 0; + +ocf_utils_rq_init_ERROR: + + ocf_rq_allocator_deinit(ocf_ctx); + + return -1; +} + +void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx) +{ + int i; + struct ocf_rq_allocator *rq; + + OCF_DEBUG_TRACE(cache); + + + if (!ocf_ctx->resources.rq) + return; + + rq = ocf_ctx->resources.rq; + + for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) { + if (rq->allocator[i]) { + env_allocator_destroy(rq->allocator[i]); + rq->allocator[i] = NULL; + } + } + + env_free(rq); + ocf_ctx->resources.rq = NULL; +} + +static inline env_allocator *_ocf_rq_get_allocator_1( + struct ocf_cache *cache) +{ + return cache->owner->resources.rq->allocator[0]; +} + +static env_allocator *_ocf_rq_get_allocator( + struct ocf_cache *cache, uint32_t count) +{ + struct ocf_ctx *ocf_ctx = cache->owner; + unsigned int idx = 31 - __builtin_clz(count); + + if (__builtin_ffs(count) <= idx) + idx++; + + ENV_BUG_ON(count == 0); + + if (idx >= ocf_rq_size_max) + return NULL; + + return ocf_ctx->resources.rq->allocator[idx]; +} + +static void start_cache_req(struct ocf_request *rq) +{ + ocf_cache_t cache = rq->cache; + + rq->d2c = 1; + if (env_atomic_read(&cache->attached)) { + rq->d2c = 0 ; + env_atomic_inc(&cache->pending_cache_requests); + if (!env_atomic_read(&cache->attached)) { + rq->d2c = 1; + env_atomic_dec(&cache->pending_cache_requests); + } + } +} + +struct ocf_request *ocf_rq_new(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw) +{ + uint64_t core_line_first, core_line_last, core_line_count; + struct ocf_request *rq; + env_allocator *allocator; + + if (likely(bytes)) { + core_line_first = ocf_bytes_2_lines(cache, addr); + core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1); + core_line_count = core_line_last - core_line_first + 1; + } else { + core_line_first = ocf_bytes_2_lines(cache, addr); + core_line_last = core_line_first; + core_line_count = 1; + } + + allocator = _ocf_rq_get_allocator(cache, core_line_count); + if (allocator) { + rq = env_allocator_new(allocator); + } else { + rq = env_allocator_new(_ocf_rq_get_allocator_1(cache)); + } + + if (unlikely(!rq)) + return NULL; + + if (allocator) + rq->map = rq->__map; + + OCF_DEBUG_TRACE(cache); + + rq->cache = cache; + + env_atomic_inc(&cache->pending_requests); + start_cache_req(rq); + + rq->io_queue = 0; + env_atomic_set(&rq->ref_count, 1); + rq->core_id = core_id; + + rq->byte_position = addr; + rq->byte_length = bytes; + rq->core_line_first = core_line_first; + rq->core_line_last = core_line_last; + rq->core_line_count = core_line_count; + rq->alloc_core_line_count = core_line_count; + rq->rw = rw; + rq->part_id = PARTITION_DEFAULT; + + return rq; +} + +int ocf_rq_alloc_map(struct ocf_request *rq) +{ + if (rq->map) + return 0; + + rq->map = env_zalloc(ocf_rq_sizeof_map(rq), ENV_MEM_NOIO); + if (!rq->map) { + rq->error = -ENOMEM; + return -ENOMEM; + } + + return 0; +} + +struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw) +{ + struct ocf_request *rq; + + rq = ocf_rq_new(cache, core_id, addr, bytes, rw); + + if (likely(rq) && ocf_rq_alloc_map(rq)) { + ocf_rq_put(rq); + return NULL; + } + + return rq; +} + +struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw) +{ + struct ocf_request *rq; + + rq = ocf_rq_new_extended(cache, core_id, addr, + MIN(bytes, MAX_TRIM_RQ_SIZE),rw); + + if (!rq) + return NULL; + + rq->discard.sector = BYTES_TO_SECTORS(addr); + rq->discard.nr_sects = BYTES_TO_SECTORS(bytes); + rq->discard.handled = 0; + + return rq; +} + +void ocf_rq_get(struct ocf_request *rq) +{ + OCF_DEBUG_TRACE(rq->cache); + + env_atomic_inc(&rq->ref_count); +} + +void ocf_rq_put(struct ocf_request *rq) +{ + env_allocator *allocator; + + if (env_atomic_dec_return(&rq->ref_count)) + return; + + OCF_DEBUG_TRACE(rq->cache); + + if (!rq->d2c && !env_atomic_dec_return( + &rq->cache->pending_cache_requests)) { + env_waitqueue_wake_up(&rq->cache->pending_cache_wq); + } + + env_atomic_dec(&rq->cache->pending_requests); + + allocator = _ocf_rq_get_allocator(rq->cache, + rq->alloc_core_line_count); + if (allocator) { + env_allocator_del(allocator, rq); + } else { + env_free(rq->map); + env_allocator_del(_ocf_rq_get_allocator_1(rq->cache), rq); + } +} + +void ocf_rq_clear_info(struct ocf_request *rq) +{ + ENV_BUG_ON(env_memset(&rq->info, sizeof(rq->info), 0)); +} + +void ocf_rq_clear_map(struct ocf_request *rq) +{ + if (likely(rq->map)) + ENV_BUG_ON(env_memset(rq->map, + sizeof(rq->map[0]) * rq->core_line_count, 0)); +} + +uint32_t ocf_rq_get_allocated(struct ocf_cache *cache) +{ + return env_atomic_read(&cache->pending_requests); +} diff --git a/src/utils/utils_rq.h b/src/utils/utils_rq.h new file mode 100644 index 0000000..42171c2 --- /dev/null +++ b/src/utils/utils_rq.h @@ -0,0 +1,154 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef UTILS_RQ_H_ +#define UTILS_RQ_H_ + +#include "../ocf_request.h" + +/** + * @file utils_rq.h + * @brief OCF request allocation utilities + */ + +struct ocf_rq_allocator; + +/** + * @brief Initialize OCF request allocation utility + * + * @param cache - OCF cache instance + * @return Operation status 0 - successful, non-zero failure + */ +int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx); + +/** + * @brief De-initialize OCF request allocation utility + * + * @param cache - OCF cache instance + */ +void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx); + +/** + * @brief Allocate new OCF request + * + * @param cache - OCF cache instance + * @param core_id - Core id + * @param addr - LBA of request + * @param bytes - number of bytes of request + * @param rw - Read or Write + * + * @return new OCF request + */ +struct ocf_request *ocf_rq_new(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw); + +/** + * @brief Allocate OCF request map + * + * @param rq OCF request + * + * @retval 0 Allocation succeed + * @retval non-zero Allocation failed + */ +int ocf_rq_alloc_map(struct ocf_request *rq); + +/** + * @brief Allocate new OCF request with NOIO map allocation for huge request + * + * @param cache - OCF cache instance + * @param core_id - Core id + * @param addr - LBA of request + * @param bytes - number of bytes of request + * @param rw - Read or Write + * + * @return new OCF request + */ + +struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw); + +/** + * @brief Allocate new OCF request for DISCARD operation + * + * @param cache - OCF cache instance + * @param core_id - Core id + * @param addr - LBA of request + * @param bytes - number of bytes of request + * @param rw - Read or Write + * + * @return new OCF request + */ +struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache, + ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw); + +/** + * @brief Get number of allocated requests + * + * @param cache OCF cache instance + * + * @return Number of allocated requests + */ +uint32_t ocf_rq_get_allocated(struct ocf_cache *cache); + +/** + * @brief Increment OCF request reference count + * + * @param rq - OCF request + */ +void ocf_rq_get(struct ocf_request *rq); + +/** + * @brief Decrement OCF request reference. If reference is 0 then request will + * be deallocated + * + * @param rq - OCF request + */ +void ocf_rq_put(struct ocf_request *rq); + +/** + * @brief Clear OCF request info + * + * @param rq - OCF request + */ +void ocf_rq_clear_info(struct ocf_request *rq); + +/** + * @brief Clear OCF request map + * + * @param rq - OCF request + */ +void ocf_rq_clear_map(struct ocf_request *rq); + +/** + * @brief Clear OCF request + * + * @param rq - OCF request + */ +static inline void ocf_rq_clear(struct ocf_request *rq) +{ + ocf_rq_clear_info(rq); + ocf_rq_clear_map(rq); + + env_atomic_set(&rq->lock_remaining, 0); + env_atomic_set(&rq->req_remaining, 0); +} + +/** + * @brief Return OCF request reference count + * + * @param rq - OCF request + * @return OCF request reference count + */ +static inline int ocf_rq_ref_count(struct ocf_request *rq) +{ + return env_atomic_read(&rq->ref_count); +} + +static inline bool ocf_rq_is_4k(uint64_t addr, uint32_t bytes) +{ + return !((addr % PAGE_SIZE) || (bytes % PAGE_SIZE)); +} + +#endif /* UTILS_RQ_H_ */ diff --git a/tests/ut-framework/README b/tests/ut-framework/README new file mode 100644 index 0000000..e3e98e8 --- /dev/null +++ b/tests/ut-framework/README @@ -0,0 +1,11 @@ +GENERATING NEW TEST + To add new test, run "add_new_test_file.py" with two parameters: + - tested file path (path must be relative to your current working dir) + - tested function name + Generated file name may be changed without any consequences. + + Good practise is to use "add_new_test_file.py" script from test directory (not framework), + because it prepend appropriate license header. + +RUNNING SINGLE TEST + Executable tests files are stored by default in 'UT_dir/build/sources_to_test_repository/' diff --git a/tests/ut-framework/add_new_test_file.py b/tests/ut-framework/add_new_test_file.py new file mode 100755 index 0000000..35ea0a9 --- /dev/null +++ b/tests/ut-framework/add_new_test_file.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python2 + +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import tests_config +import re +import os +import sys +import textwrap + +class TestGenerator(object): + main_UT_dir = "" + main_tested_dir = "" + tested_file_path = "" + tested_function_name = "" + + def __init__(self, main_UT_dir, main_tested_dir, file_path, func_name): + self.set_main_UT_dir(main_UT_dir) + self.set_main_tested_dir(main_tested_dir) + self.set_tested_file_path(file_path) + self.tested_function_name = func_name + + def create_empty_test_file(self): + dst_dir = os.path.dirname(self.get_tested_file_path()[::-1])[::-1] + + self.create_dir_if_not_exist(self.get_main_UT_dir() + dst_dir) + test_file_name = os.path.basename(self.get_tested_file_path()) + + dst_path = self.get_main_UT_dir() + dst_dir + "/" + test_file_name + + no_str = "" + no = 0 + while True: + if not os.path.isfile(dst_path.rsplit(".", 1)[0] + no_str + "." + dst_path.rsplit(".", 1)[1]): + break + no += 1 + no_str = str(no) + + dst_path = dst_path.rsplit(".", 1)[0] + no_str + "." + dst_path.rsplit(".", 1)[1] + buf = self.get_markups() + buf += "#undef static\n\n" + buf += "#undef inline\n\n" + buf += self.get_UT_includes() + buf += self.get_includes(self.get_main_tested_dir() + self.get_tested_file_path()) + buf += self.get_empty_test_function() + buf += self.get_test_main(self.get_tested_file_path()) + + with open(dst_path, "w") as f: + code = f.writelines(buf) + + print dst_path + " generated succesfully!" + + def get_markups(self): + ret = "/*\n" + ret += " * " + self.get_tested_file_path() + "\n" + ret += " * " + self.get_tested_function_name() + "\n" + ret += " * \n" + ret += " *\tINSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE\n" + ret += " *\tONE FUNCTION PER LINE\n" + ret += " * \n" + ret += " */\n\n" + + return ret + + def create_dir_if_not_exist(self, path): + if not os.path.isdir(path): + try: + os.makedirs(path) + except Exception: + pass + return True + return None + + + def get_UT_includes(self): + ret = ''' + #include + #include + #include + #include + #include "print_desc.h"\n\n''' + + return textwrap.dedent(ret) + + def get_includes(self, abs_path_to_tested_file): + ret = [] + code = [] + + with open(abs_path_to_tested_file, "r") as f: + code = f.readlines() + + ret = [line for line in code if re.search(r'#include', line)] + + return "".join(ret) + "\n\n" + + def get_empty_test_function(self): + ret = "static void " + self.get_tested_function_name() + "_test01(void **state)\n" + ret += "{\n" + ret += "\tprint_test_description(\"Put test description here\");\n" + ret += "\tassert_int_equal(1,1);\n" + ret += "}\n\n" + + return ret + + def get_test_main(self, rel_path): + ret = "int main(void)\n" + ret += "{\n" + ret += "\tconst struct CMUnitTest tests[] = {\n" + ret += "\t\tcmocka_unit_test(" + self.get_tested_function_name() + "_test01)\n" + ret += "\t};\n\n" + ret += "\tprint_message(\"Unit test of " + self.get_tested_file_path() + "\");\n\n" + ret += "\treturn cmocka_run_group_tests(tests, NULL, NULL);\n" + ret += "}" + + return ret + + def set_tested_file_path(self, path): + call_dir = os.getcwd() + os.sep + p = os.path.normpath(call_dir + path) + + if os.path.isfile(p): + self.tested_file_path = p.split(self.get_main_tested_dir(), 1)[1] + return + elif os.path.isfile(self.get_main_tested_dir() + path): + self.tested_file_path = path + return + + print self.get_main_tested_dir() + path + print "Given path not exists!" + exit(1) + + + def set_main_UT_dir(self, path): + p = os.path.dirname(os.path.realpath(__file__)) + os.sep + path + p = os.path.normpath(os.path.dirname(p)) + os.sep + self.main_UT_dir = p + + def get_main_UT_dir(self): + return self.main_UT_dir + + def set_main_tested_dir(self, path): + p = os.path.dirname(os.path.realpath(__file__)) + os.sep + path + p = os.path.normpath(os.path.dirname(p)) + os.sep + self.main_tested_dir = p + + def get_main_tested_dir(self): + return self.main_tested_dir + + def get_tested_file_path(self): + return self.tested_file_path + + def get_tested_function_name(self): + return self.tested_function_name + +def __main__(): + if len(sys.argv) < 3: + print "No path to tested file or tested function name given !" + sys.exit(1) + + tested_file_path = sys.argv[1] + tested_function_name = sys.argv[2] + + generator = TestGenerator(tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS,\ + tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT,\ + tested_file_path, tested_function_name) + + generator.create_empty_test_file() + +if __name__ == "__main__": + __main__() diff --git a/tests/ut-framework/prepare_sources_for_testing.py b/tests/ut-framework/prepare_sources_for_testing.py new file mode 100755 index 0000000..1f68f69 --- /dev/null +++ b/tests/ut-framework/prepare_sources_for_testing.py @@ -0,0 +1,594 @@ +#!/usr/bin/env python2 + +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import shutil +import sys +import re +import commands +import os.path +from collections import defaultdict + +import tests_config +# +# This script purpose is to remove unused functions definitions +# It is giving the oportunity to unit test all functions from OCF. +# As a parameter should be given path to file containing function, +# which is target of testing. However that file has to be after +# preprocessing. +# +# Output file of this script is not ready to make it. Before that, +# has to be given definitions of functions, which are used by +# tested function. +# +# In brief: this script allow wraping all function calls in UT +# + +class UnitTestsSourcesGenerator(object): + script_file_abs_path = "" + script_dir_abs_path = "" + + main_UT_dir = "" + main_tested_dir = "" + + ctags_path = "" + + test_catalouges_list = [] + dirs_to_include_list = [] + + tests_internal_includes_list = [] + framework_includes = [] + + dirs_with_tests_list = [] + test_files_paths_list = [] + + tested_files_paths_list = [] + + includes_to_copy_dict = {} + + preprocessing_repo = "" + sources_to_test_repo = "" + + def __init__(self): + self.script_file_abs_path = os.path.realpath(__file__) + self.script_dir_abs_path = os.path.normpath(os.path.dirname(self.script_file_abs_path) + os.sep) + + self.set_ctags_path() + + self.set_main_UT_dir(tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS) + self.set_main_tested_dir(tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT) + + self.test_catalouges_list = tests_config.DIRECTORIES_WITH_TESTS_LIST + self.set_includes_to_copy_dict(tests_config.INCLUDES_TO_COPY_DICT) + self.set_dirs_to_include() + + self.set_tests_internal_includes_list() + self.set_framework_includes() + self.set_files_with_tests_list() + self.set_tested_files_paths_list() + + self.set_preprocessing_repo() + self.set_sources_to_test_repo() + + def preprocessing(self): + tested_files_list = self.get_tested_files_paths_list() + project_includes = self.get_dirs_to_include_list() + framework_includes = self.get_tests_internal_includes_list() + + gcc_flags = " -fno-inline -Dstatic= -Dinline= -E " + gcc_command_template = "gcc " + for path in project_includes: + gcc_command_template += " -I " + path + " " + + for path in framework_includes: + gcc_command_template += " -I " + path + + gcc_command_template += gcc_flags + + for path in tested_files_list: + preprocessing_dst = self.get_preprocessing_repo() +\ + self.get_relative_path(path, self.get_main_tested_dir()) + preprocessing_dst_dir = os.path.dirname(preprocessing_dst) + self.create_dir_if_not_exist(preprocessing_dst_dir) + + gcc_command = gcc_command_template +\ + path + " > " + preprocessing_dst + + status, output = commands.getstatusoutput(gcc_command) + + if status != 0: + print "Generating preprocessing for " + self.get_main_tested_dir() + path \ + + " failed!" + print output + commands.getoutput("rm -f " + preprocessing_dst) + continue + + self.remove_hashes(preprocessing_dst) + + print "Preprocessed file " + path + " saved to " + preprocessing_dst + + def copy_includes(self): + includes_dict = self.get_includes_to_copy_dict() + + for dst, src in includes_dict.iteritems(): + src_path = os.path.normpath(self.get_main_tested_dir() + src) + if not os.path.isdir(src_path): + print "Directory " + src_path + " given to include does not exists!" + continue + dst_path = os.path.normpath(self.get_main_UT_dir() + dst) + + shutil.rmtree(dst_path) + shutil.copytree(src_path, dst_path) + + def prepare_sources_for_testing(self): + test_files_paths = self.get_files_with_tests_list() + + for test_path in test_files_paths: + path = self.get_tested_file_path(self.get_main_UT_dir() + test_path) + + preprocessed_tested_path = self.get_preprocessing_repo() + path + if not os.path.isfile(preprocessed_tested_path): + print "No preprocessed path for " + test_path + " test file." + continue + + tested_src = self.get_src_to_test(test_path, preprocessed_tested_path) + + self.create_dir_if_not_exist(self.get_sources_to_test_repo() + os.path.dirname(test_path)) + + with open(self.get_sources_to_test_repo() + test_path, "w") as f: + f.writelines(tested_src) + print "Sources for " + test_path + " saved in " +\ + self.get_sources_to_test_repo() + test_path + + def create_main_cmake_lists(self): + buf = "cmake_minimum_required(VERSION 2.6.0)\n\n" + buf += "project(OCF_unit_tests C)\n\n" + + buf += "enable_testing()\n\n" + + buf += "include_directories(\n" + dirs_to_inc = self.get_dirs_to_include_list() + self.get_framework_includes()\ + + self.get_tests_internal_includes_list() + for path in dirs_to_inc: + buf += "\t" + path + "\n" + buf += ")\n\n" + + includes = self.get_tests_internal_includes_list() + for path in includes: + buf += "\nadd_subdirectory(" + path + ")" + buf += "\n\n" + + test_files = self.get_files_with_tests_list() + test_dirs_to_include = [os.path.dirname(path) for path in test_files] + + test_dirs_to_include = self.remove_duplicates_from_list(test_dirs_to_include) + + for path in test_dirs_to_include: + buf += "\nadd_subdirectory(" + self.get_sources_to_test_repo() + path + ")" + + + with open(self.get_main_UT_dir() + "CMakeLists.txt", "w") as f: + f.writelines(buf) + + print "Main CMakeLists.txt generated written to " + self.get_main_UT_dir() + "CMakeLists.txt" + + def generate_cmakes_for_tests(self): + test_files_paths = self.get_files_with_tests_list() + + for test_path in test_files_paths: + tested_file_relative_path = self.get_tested_file_path(self.get_main_UT_dir() + test_path) + + tested_file_path = self.get_sources_to_test_repo() + test_path + if not os.path.isfile(tested_file_path): + print "No source to test for " + test_path + " test" + continue + + test_file_dir = os.path.dirname(test_path) + test_file_path = self.get_main_UT_dir() + test_path + + cmake_buf = self.generate_test_cmake_buf(test_file_path, tested_file_path) + + cmake_path = self.get_sources_to_test_repo() + test_path + cmake_path = os.path.splitext(cmake_path)[0] + ".cmake" + with open(cmake_path, "w") as f: + f.writelines(cmake_buf) + print "cmake file for " + test_path + " written to " + cmake_path + + cmake_lists_path = os.path.dirname(cmake_path) + os.sep + self.update_cmakelists(cmake_lists_path, cmake_path) + + def generate_test_cmake_buf(self, test_file_path, tested_file_path): + test_file_name = os.path.basename(test_file_path) + target_name = os.path.splitext(test_file_name)[0] + + add_executable = "add_executable(" + target_name + " " + test_file_path + " " + tested_file_path + ")\n" + + libraries = "target_link_libraries(" + target_name + " libcmocka.so ocf_env)\n" + + add_test = "add_test(" + target_name + " ${CMAKE_CURRENT_BINARY_DIR}/" + target_name + ")\n" + + tgt_properties = "set_target_properties(" + target_name + "\n" + \ + "PROPERTIES\n" + \ + "COMPILE_FLAGS \"-fno-inline -Dstatic= -Dinline= -w \"\n" + + link_flags = self.generate_cmake_link_flags(test_file_path) + tgt_properties += link_flags + ")" + + buf = add_executable + libraries + add_test + tgt_properties + + return buf + + def generate_cmake_link_flags(self, path): + ret = "" + + functions_to_wrap = self.get_functions_to_wrap(path) + + for function_name in functions_to_wrap: + ret += ",--wrap=" + function_name + if len(ret) > 0: + ret = "LINK_FLAGS \"-Wl" + ret + "\"\n" + + return ret + + def update_cmakelists(self, cmake_lists_path, cmake_name): + with open(cmake_lists_path + "CMakeLists.txt", "a+") as f: + f.seek(0, os.SEEK_SET) + new_line = "include(" + os.path.basename(cmake_name) + ")\n" + + if not new_line in f.read(): + f.write(new_line) + + def get_functions_to_wrap(self, path): + functions_list = self.get_functions_list(path) + functions_list = [re.sub(r'__wrap_([\S]+)\s*[\d]+', r'\1', line) for line in functions_list if re.search("__wrap_", line)] + + return functions_list + + def get_functions_to_leave(self, path): + buf = "" + + with open(path) as f: + l = f.readlines() + buf = ''.join(l) + + tags_pattern = re.compile("[\s\S]*") + + buf = re.findall(tags_pattern, buf) + if not len(buf) > 0: + return [] + + buf = buf[0] + + buf = re.sub(r'<.*>', '', buf) + buf = re.sub(r'[^a-zA-Z0-9_\n]+', '', buf) + + ret = buf.split("\n") + ret = [name for name in ret if name] + return ret + + def get_functions_list(self, file_path): + ctags_path = self.get_ctags_path() + + # find all functions' definitions | put tabs instead of spaces | + # take only columns with function name and line number | sort in descending order + status, output = commands.getstatusoutput(ctags_path + "-x --c-types=f " + file_path + " --language-force=c | \ + sed \"s/ \\+/\t/g\" | cut -f 1,3 | sort -nsr -k 2") + + # 'output' is string, but it has to be changed to list + output = output.split("\n") + return output + + def remove_functions_from_list(self, functions_list, to_remove_list): + ret = functions_list[:] + for function_name in to_remove_list: + ret = [line for line in ret if not re.search(r'\b%s\b' % function_name, line)] + return ret + + def get_src_to_test(self, test_path, preprocessed_tested_path): + functions_to_leave = self.get_functions_to_leave(self.get_main_UT_dir() + test_path) + + functions_to_leave.append(self.get_tested_function_name(self.get_main_UT_dir() + test_path)) + functions_list = self.get_functions_list(preprocessed_tested_path) + + functions_list = self.remove_functions_from_list(functions_list, functions_to_leave) + + with open(preprocessed_tested_path) as f: + ret = f.readlines() + for function in functions_list: + line = function.split("\t")[1] + line = int(line) + + self.remove_function_body(ret, line) + + return ret + + def set_tested_files_paths_list(self): + test_files_list = self.get_files_with_tests_list() + + for f in test_files_list: + self.tested_files_paths_list.append(self.get_main_tested_dir() +\ + self.get_tested_file_path(self.get_main_UT_dir() + f)) + + self.tested_files_paths_list = self.remove_duplicates_from_list(self.tested_files_paths_list) + + def get_tested_files_paths_list(self): + return self.tested_files_paths_list + + def get_files_with_tests_list(self): + return self.test_files_paths_list + + def set_files_with_tests_list(self): + test_catalouges_list = self.get_tests_catalouges_list() + for catalouge in test_catalouges_list: + dir_with_tests_path = self.get_main_UT_dir() + catalouge + + for path, dirs, files in os.walk(dir_with_tests_path): + test_files = self.get_test_files_from_dir(path + os.sep) + + for test_file_name in test_files: + test_rel_path = os.path.relpath(path + os.sep + test_file_name, self.get_main_UT_dir()) + self.test_files_paths_list.append(test_rel_path) + + def are_markups_valid(self, path): + file_path = self.get_tested_file_path(path) + function_name = self.get_tested_function_name(path) + + if file_path == None: + print path + " file has no tested_file tag!" + return None + elif not os.path.isfile(self.get_main_tested_dir() + file_path): + print "Tested file given in " + path + " not exist!" + return None + + if function_name == None: + print path + " file has no tested_function_name tag!" + return None + + return True + + def create_dir_if_not_exist(self, path): + if not os.path.isdir(path): + try: + os.makedirs(path) + except Exception: + pass + return True + return None + + def get_tested_file_path(self, test_file_path): + buf = "" + with open(test_file_path) as f: + buf = f.readlines() + buf = ''.join(buf) + + tags_pattern = re.compile("[\s\S]*") + buf = re.findall(tags_pattern, buf) + + if not len(buf) > 0: + return None + + buf = buf[0] + + buf = re.sub(r'<[^>]*>', '', buf) + buf = re.sub(r'\s+', '', buf) + + if len(buf) > 0: + return buf + + return None + + def get_tested_function_name(self, test_file_path): + buf = "" + with open(test_file_path) as f: + buf = f.readlines() + buf = ''.join(buf) + + tags_pattern = re.compile("[\s\S]*") + buf = re.findall(tags_pattern, buf) + + if not len(buf) > 0: + return None + + buf = buf[0] + + buf = re.sub(r'<[^>]*>', '', buf) + buf = re.sub('//', '', buf) + buf = re.sub(r'\s+', '', buf) + + if len(buf) > 0: + return buf + + return None + + def get_test_files_from_dir(self, path): + ret = os.listdir(path) + ret = [name for name in ret if os.path.isfile(path + os.sep + name) and (name.endswith(".c") or name.endswith(".h"))] + ret = [name for name in ret if self.are_markups_valid(path + name)] + + return ret + + def get_list_of_directories(self, path): + if not os.path.isdir(path): + return [] + + ret = os.listdir(path) + ret = [name for name in ret if not os.path.isfile(path + os.sep + name)] + ret = [os.path.normpath(name) + os.sep for name in ret] + + return ret + + def remove_hashes(self, path): + buf = [] + with open(path) as f: + buf = f.readlines() + + buf = [l for l in buf if not re.search(r'.*#.*', l)] + + with open(path, "w") as f: + f.writelines(buf) + + return + for i in range(len(padding)): + try: + padding[i] = padding[i].split("#")[0] + except ValueError: + continue + + f = open(path, "w") + f.writelines(padding) + f.close() + + def find_function_end(self,code_lines_list, first_line_of_function_index): + brackets_counter = 0 + current_line_index = first_line_of_function_index + + while(True): + if "{" in code_lines_list[current_line_index]: + brackets_counter += code_lines_list[current_line_index].count("{") + brackets_counter -= code_lines_list[current_line_index].count("}") + break + else: + current_line_index += 1 + + while(brackets_counter > 0): + current_line_index += 1 + if "{" in code_lines_list[current_line_index]: + brackets_counter += code_lines_list[current_line_index].count("{") + brackets_counter -= code_lines_list[current_line_index].count("}") + elif "}" in code_lines_list[current_line_index]: + brackets_counter -= code_lines_list[current_line_index].count("}") + + return current_line_index + + def remove_function_body(self, code_lines_list, line_id): + try: + while "{" not in code_lines_list[line_id]: + if ";" in code_lines_list[line_id]: + return + line_id += 1 + except IndexError: + return + + last_line_id = self.find_function_end(code_lines_list, line_id) + + code_lines_list[line_id] = code_lines_list[line_id].split("{")[0] + code_lines_list[line_id] += ";" + + del code_lines_list[line_id + 1: last_line_id + 1] + + def set_ctags_path(self): + path = "" + status, output = commands.getstatusoutput("/usr/bin/ctags --version &> /dev/null") + if status == 0: + path = "/usr/bin/ctags " + status, output = commands.getstatusoutput(path + "--c-types=f") + if not re.search("unrecognized option", output, re.IGNORECASE): + self.ctags_path = path + return + + status, output = commands.getstatusoutput("/usr/local/bin/ctags --version &> /dev/null") + if status == 0: + path = "/usr/local/bin/ctags " + status, output = commands.getstatusoutput(path + "--c-types=f") + if not re.search("unrecognized option", output, re.IGNORECASE): + self.ctags_path = path + return + + print "ERROR: Current ctags version don't support \"--c-types=f\" parameter!" + exit(1) + + def get_ctags_path(self): + return self.ctags_path + + def get_tests_catalouges_list(self): + return self.test_catalouges_list + + def get_relative_path(self, original_path, part_to_remove): + return original_path.split(part_to_remove, 1)[1] + + def get_dirs_to_include_list(self): + return self.dirs_to_include_list + + def set_dirs_to_include(self): + self.dirs_to_include_list = [self.get_main_tested_dir() + name\ + for name in tests_config.DIRECTORIES_TO_INCLUDE_FROM_PROJECT_LIST] + + def set_tests_internal_includes_list(self): + self.tests_internal_includes_list = [self.get_main_UT_dir() + name\ + for name in tests_config.DIRECTORIES_TO_INCLUDE_FROM_UT_LIST] + + def set_preprocessing_repo(self): + self.preprocessing_repo = self.get_main_UT_dir() +\ + tests_config.PREPROCESSED_SOURCES_REPOSITORY + + def set_sources_to_test_repo(self): + self.sources_to_test_repo = self.get_main_UT_dir() +\ + tests_config.SOURCES_TO_TEST_REPOSITORY + + def get_sources_to_test_repo(self): + return self.sources_to_test_repo + + def get_preprocessing_repo(self): + return self.preprocessing_repo + + def get_tests_internal_includes_list(self): + return self.tests_internal_includes_list + + def get_script_dir_path(self): + return os.path.normpath(self.script_dir_abs_path) + os.sep + + def get_main_UT_dir(self): + return os.path.normpath(self.main_UT_dir) + os.sep + + def get_main_tested_dir(self): + return os.path.normpath(self.main_tested_dir) + os.sep + + def remove_duplicates_from_list(self, l): + return list(set(l)) + + def set_framework_includes(self): + self.framework_includes = tests_config.FRAMEWORK_DIRECTORIES_TO_INCLUDE_LIST + + def get_framework_includes(self): + return self.framework_includes + + def set_includes_to_copy_dict(self, files_to_copy_dict): + self.includes_to_copy_dict = files_to_copy_dict + + def get_includes_to_copy_dict(self): + return self.includes_to_copy_dict + + def set_main_UT_dir(self, path): + main_UT_dir = os.path.normpath(os.path.normpath(self.get_script_dir_path()\ + + os.sep + tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS)) + if not os.path.isdir(main_UT_dir): + print "Given path to main UT directory is wrong!" + sys.exit(1) + + self.main_UT_dir = main_UT_dir + + def set_main_tested_dir(self, path): + main_tested_dir = os.path.normpath(os.path.normpath(self.get_script_dir_path()\ + + os.sep + tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT)) + if not os.path.isdir(main_tested_dir): + print "Given path to main tested directory is wrong!" + sys.exit(1) + + self.main_tested_dir = main_tested_dir + +def __main__(): + + generator = UnitTestsSourcesGenerator() + generator.copy_includes() + generator.preprocessing() + generator.prepare_sources_for_testing() + generator.create_main_cmake_lists() + generator.generate_cmakes_for_tests() + + print "Files for testing generated!" + +if __name__ == "__main__": + __main__() diff --git a/tests/ut-framework/run_unit_tests.py b/tests/ut-framework/run_unit_tests.py new file mode 100755 index 0000000..17905ea --- /dev/null +++ b/tests/ut-framework/run_unit_tests.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python2 + +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import tests_config +import os +import commands + +script_path = os.path.dirname(os.path.realpath(__file__)) + +main_UT_dir = os.path.normpath(script_path + os.sep\ + + tests_config.MAIN_DIRECTORY_OF_UNIT_TESTS) + os.sep + +main_tested_dir = os.path.normpath(script_path + os.sep\ + + tests_config.MAIN_DIRECTORY_OF_TESTED_PROJECT) + os.sep + + +if not os.path.isdir(main_UT_dir + "ocf_env" + os.sep + "ocf"): + try: + os.makedirs(main_UT_dir + "ocf_env" + os.sep + "ocf") + except Exception: + print "Cannot crate ocf_env/ocf directory!" + +status, output = commands.getstatusoutput("cp " + main_tested_dir +\ + "inc" + os.sep + "*" + " " + main_UT_dir + "ocf_env" + os.sep + "ocf") + + +if os.system(script_path + os.sep + "prepare_sources_for_testing.py") != 0: + print "Preparing sources for testing failed!" + exit() + + +build_dir = main_UT_dir + "build" + os.sep + +if not os.path.isdir(build_dir): + try: + os.makedirs(build_dir) + except Exception: + print "Cannot crate build directory!" + +status, output = commands.getstatusoutput("cd " + build_dir + " && cmake .. && make && make test") + +print output diff --git a/tests/ut-framework/tests_config.py b/tests/ut-framework/tests_config.py new file mode 100644 index 0000000..2fce569 --- /dev/null +++ b/tests/ut-framework/tests_config.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python2 + +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +# ALL PATHS SHOULD BE ENDEND WITH "/" CHARACTER + +# Path should be absolute +MAIN_DIRECTORY_OF_TESTED_PROJECT = "../../" +#MAIN_DIRECTORY_OF_TESTED_PROJECT = "/root/OCFF_Linux/source/casadm/kcas/ocf/" + +# Path should be absolute +MAIN_DIRECTORY_OF_UNIT_TESTS = "../ut/" +#MAIN_DIRECTORY_OF_UNIT_TESTS = "/root/OCFF_Linux/source/casadm/kcas/ocf/UT/" + +# Paths to all direcotries, in which are stored tests. All pathts should be relative to MAIN_DIRECTORY_OF_UNIT_TESTS +DIRECTORIES_WITH_TESTS_LIST = ["cleaning/", "metadata/", "mngt/", "concurrency/", "engine/", "eviction/", "utils/"] + +# Paths to all directoris containing files with sources. All paths should be relative to MAIN_DIRECTORY_OF_TESTED_PROJECT +DIRECTORIES_TO_INCLUDE_FROM_PROJECT_LIST = ["src/", "src/cleaning/", "src/engine/", "src/metadata/", "src/eviction/", "src/mngt/", "src/concurrency/", "src/utils/", "inc/"] + +# Paths to all directories from UT dir, which should also be included +DIRECTORIES_TO_INCLUDE_FROM_UT_LIST = ["ocf_env/"] + +# Paths to include, required by cmake, cmocka, cunit +FRAMEWORK_DIRECTORIES_TO_INCLUDE_LIST = ["${CMOCKA_PUBLIC_INCLUDE_DIRS}" ,"${CMAKE_BINARY_DIR}", "${CMAKE_CURRENT_SOURCE_DIR}"] + +# Path to direcory containing all sources after preprocessing. Should be relative to MAIN_DIRECTORY_OF_UNIT_TESTS +PREPROCESSED_SOURCES_REPOSITORY = "preprocessed_sources_repository/" + +# Path to directory containing all sources after removing unneeded functions and cmake files for tests +SOURCES_TO_TEST_REPOSITORY = "sources_to_test_repository/" + +# List of includes. Directories will be recursivley copied to given destinations in directory with tests. +# key - destination in dir with tests +# value - path in tested project to dir which should be copied +INCLUDES_TO_COPY_DICT = { 'ocf_env/ocf/' : "inc/" } diff --git a/tests/ut/add_new_test_file.py b/tests/ut/add_new_test_file.py new file mode 100755 index 0000000..a001055 --- /dev/null +++ b/tests/ut/add_new_test_file.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python2 + +# +# Copyright(c) 2012-2018 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause-Clear +# + +import commands +import sys +import os + +args = ' '.join(sys.argv[1:]) +script_path = os.path.dirname(os.path.realpath(__file__)) +framework_script_path = script_path + os.sep + "../ut-framework/add_new_test_file.py" +framework_script_path = os.path.normpath(framework_script_path) +status, output = commands.getstatusoutput(framework_script_path + " " + args) + +print output + +if status == 0: + path = output.split(" ", 1)[0] + with open(script_path + os.sep + "header.c", "r") as header_file: + with open(path, "r+") as source_file: + source = source_file.readlines() + + source_file.seek(0, os.SEEK_SET) + source_file.truncate() + + source_file.writelines(header_file.readlines()) + source_file.writelines(source) diff --git a/tests/ut/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c b/tests/ut/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c new file mode 100644 index 0000000..fdfe9d1 --- /dev/null +++ b/tests/ut/cleaning/alru.c/cleaning_policy_alru_initialize_part_test.c @@ -0,0 +1,118 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ +/* +src/cleaning/alru.c +cleaning_policy_alru_initialize_part + + +*/ + +#undef static +#undef inline +/* + * This headers must be in test source file. It's important that cmocka.h is + * last. + */ +#include +#include +#include +#include +#include "print_desc.h" + +/* + * Headers from tested target. + */ +#include "ocf/ocf.h" +#include "../ocf_cache_priv.h" +#include "cleaning.h" +#include "alru.h" +#include "../metadata/metadata.h" +#include "../utils/utils_cleaner.h" +#include "../utils/utils_part.h" +#include "../utils/utils_allocator.h" +#include "../concurrency/ocf_cache_concurrency.h" +#include "../ocf_def_priv.h" + +void _alru_rebuild(struct ocf_cache *cache) +{ +} + +static void cleaning_policy_alru_initialize_test01(void **state) +{ + int result; + struct ocf_cache cache; + ocf_part_id_t part_id = 0; + + int collision_table_entries = 900729; + cache.collision_table_entries = collision_table_entries; + + print_test_description("Check if all variables are set correctly"); + + cache.user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime)); + cache.runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime)); + + result = cleaning_policy_alru_initialize_part(&cache, &cache.user_parts[part_id], 1, 1); + + assert_int_equal(result, 0); + + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.size, 0); + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_head, collision_table_entries); + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, collision_table_entries); + + assert_int_equal(cache.runtime_meta->cleaning_thread_access, 0); + + test_free(cache.runtime_meta); + test_free(cache.user_parts[part_id].runtime); +} + +static void cleaning_policy_alru_initialize_test02(void **state) +{ + int result; + struct ocf_cache cache; + ocf_part_id_t part_id = 0; + + uint32_t collision_table_entries = 900729; + cache.collision_table_entries = collision_table_entries; + + print_test_description("Check if only appropirate variables are changed"); + + cache.user_parts[part_id].runtime = test_malloc(sizeof(struct ocf_user_part_runtime)); + cache.runtime_meta = test_malloc(sizeof(struct ocf_superblock_runtime)); + + cache.user_parts[part_id].runtime->cleaning.policy.alru.size = 1; + cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_head = -collision_table_entries; + cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_tail = -collision_table_entries; + + result = cleaning_policy_alru_initialize_part(&cache, &cache.user_parts[part_id], 0, 0); + + assert_int_equal(result, 0); + + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.size, 1); + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_head, -collision_table_entries); + assert_int_equal(cache.user_parts[part_id].runtime->cleaning.policy.alru.lru_tail, -collision_table_entries); + + assert_int_equal(cache.runtime_meta->cleaning_thread_access, 0); + + test_free(cache.runtime_meta); + test_free(cache.user_parts[part_id].runtime); +} + +/* + * Main function. It runs tests. + */ +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(cleaning_policy_alru_initialize_test01), + cmocka_unit_test(cleaning_policy_alru_initialize_test02) + }; + + print_message("Unit test of alru.c\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} + + + diff --git a/tests/ut/cleaning/cleaning.c/ocf_cleaner_run_test.c b/tests/ut/cleaning/cleaning.c/ocf_cleaner_run_test.c new file mode 100644 index 0000000..6fa9090 --- /dev/null +++ b/tests/ut/cleaning/cleaning.c/ocf_cleaner_run_test.c @@ -0,0 +1,258 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +/* + * This headers must be in test source file. It's important that cmocka.h is + * last. + */ + +#undef static +#undef inline + +//src/cleaning/cleaning.c +//ocf_cleaner_run + + +#include +#include +#include +#include +#include "print_desc.h" + +/* + * Headers from tested target. + */ +#include "cleaning.h" +#include "alru.h" +#include "acp.h" +#include "../ocf_cache_priv.h" +#include "../ocf_ctx_priv.h" +#include "../mngt/ocf_mngt_common.h" +#include "../metadata/metadata.h" + +/* + * Mocked functions. Here we must deliver functions definitions which are not + * in tested source file. + */ + +void __wrap_cleaning_policy_alru_setup(struct ocf_cache *cache) +{} + +int __wrap_cleaning_policy_alru_set_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t param_value) +{ +} + +int __wrap_cleaning_policy_alru_get_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t *param_value) +{ +} + +int __wrap_cleaning_policy_acp_initialize(struct ocf_cache *cache, + int init_metadata, int init_params){} + +void __wrap_cleaning_policy_acp_deinitialize(struct ocf_cache *cache){} + +int __wrap_cleaning_policy_acp_perform_cleaning(struct ocf_cache *cache, + uint32_t io_queue){} + +void __wrap_cleaning_policy_acp_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line){} + +void __wrap_cleaning_policy_acp_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line){} + +void __wrap_cleaning_policy_acp_purge_block(struct ocf_cache *cache, + uint32_t cache_line){} + +int __wrap_cleaning_policy_acp_purge_range(struct ocf_cache *cache, + int core_id, uint64_t start_byte, uint64_t end_byte){} + +int __wrap_cleaning_policy_acp_add_core(ocf_cache_t cache, ocf_core_id_t core_id){} + +int __wrap_cleaning_policy_acp_remove_core(ocf_cache_t cache, + ocf_core_id_t core_id){} + +void __wrap_cleaning_policy_acp_request_pending(struct ocf_request *rq){ +} + +void cleaning_policy_acp_setup(struct ocf_cache *cache) +{ +} + +int cleaning_policy_acp_set_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t param_value) +{ +} + +int cleaning_policy_acp_get_cleaning_param(struct ocf_cache *cache, + uint32_t param_id, uint32_t *param_value) +{ +} + +int __wrap_cleaning_policy_acp_set_cleaning_parameters( + struct ocf_cache *cache, struct ocf_cleaning_params *params) +{ +} + +void __wrap_cleaning_policy_acp_get_cleaning_parameters( + struct ocf_cache *cache, struct ocf_cleaning_params *params) +{ +} + +void __wrap_cleaning_policy_alru_init_cache_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + +} + +void __wrap_cleaning_policy_alru_purge_cache_block(struct ocf_cache *cache, + uint32_t cache_line) +{ + +} + +int __wrap_cleaning_policy_alru_purge_range(struct ocf_cache *cache, + int partition_id, int core_id, uint64_t start_byte, + uint64_t end_byte) +{ + +} + +void __wrap_cleaning_policy_alru_set_hot_cache_line(struct ocf_cache *cache, + uint32_t cache_line) +{ + +} + +int __wrap_cleaning_policy_alru_initialize(struct ocf_cache *cache, int partition_id, + int init_metadata) +{ + +} + +int __wrap_cleaning_policy_alru_flush_block(struct ocf_cache *cache, + uint32_t io_queue, uint32_t count, uint32_t *cache_lines, + int partition_id, int core_id, uint8_t do_lock) +{ + +} + +int __wrap_cleaning_policy_alru_set_cleaning_parameters(ocf_cache_t cache, + ocf_part_id_t part_id, struct ocf_cleaning_params *params) +{ + +} + +void __wrap_cleaning_policy_alru_get_cleaning_parameters(ocf_cache_t cache, + ocf_part_id_t part_id, struct ocf_cleaning_params *params) +{ + +} + +int __wrap_cleaning_alru_perform_cleaning(struct ocf_cache *cache, uint32_t io_queue) +{ + function_called(); + return mock(); +} + + +ocf_cache_t __wrap_ocf_cleaner_get_cache(ocf_cleaner_t c) +{ + function_called(); + return mock_ptr_type(struct ocf_cache*); +} + +bool __wrap_ocf_mngt_is_cache_locked(ocf_cache_t cache) +{ + function_called(); + return mock(); +} + + +int __wrap__ocf_cleaner_run_check_dirty_inactive(struct ocf_cache *cache) +{ + function_called(); + return mock(); +} + +int __wrap_env_bit_test(int nr, const void *addr) +{ + function_called(); + return mock(); +} + +int __wrap_env_rmutex_trylock(env_rmutex *rmutex) +{ + function_called(); + return mock(); +} + +void __wrap_env_rmutex_unlock(env_rmutex *rmutex) +{ + function_called(); +} + + +/* + * Tests of functions. Every test name must be written to tests array in main(). + * Declarations always look the same: static void test_name(void **state); + */ + +static void ocf_cleaner_run_test01(void **state) +{ + struct ocf_cache cache; + ocf_part_id_t part_id; + uint32_t io_queue; + int result; + + //Initialize needed structures. + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cleaning_policy_type = ocf_cleaning_alru; + + print_test_description("Parts are ready for cleaning - should perform cleaning" + " for each part"); + + expect_function_call(__wrap_ocf_cleaner_get_cache); + will_return(__wrap_ocf_cleaner_get_cache, &cache); + + expect_function_call(__wrap_env_bit_test); + will_return(__wrap_env_bit_test, 1); + + expect_function_call(__wrap_ocf_mngt_is_cache_locked); + will_return(__wrap_ocf_mngt_is_cache_locked, 0); + + expect_function_call(__wrap_env_rmutex_trylock); + will_return(__wrap_env_rmutex_trylock, 1); + + expect_function_call(__wrap__ocf_cleaner_run_check_dirty_inactive); + will_return(__wrap__ocf_cleaner_run_check_dirty_inactive, 0); + + expect_function_call(__wrap_cleaning_alru_perform_cleaning); + will_return(__wrap_cleaning_alru_perform_cleaning, 0); + + expect_function_call(__wrap_env_rmutex_unlock); + + result = ocf_cleaner_run(&cache.cleaner, io_queue); + assert_int_equal(result, 0); + + /* Release allocated memory if allocated with test_* functions */ + + test_free(cache.conf_meta); +} + +/* + * Main function. It runs tests. + */ +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_cleaner_run_test01) + }; + + print_message("Unit test of cleaning.c\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/tests/ut/header.c b/tests/ut/header.c new file mode 100644 index 0000000..fb2a6e5 --- /dev/null +++ b/tests/ut/header.c @@ -0,0 +1,5 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + diff --git a/tests/ut/metadata/metadata_io.c/metadata_io.c b/tests/ut/metadata/metadata_io.c/metadata_io.c new file mode 100644 index 0000000..68538f9 --- /dev/null +++ b/tests/ut/metadata/metadata_io.c/metadata_io.c @@ -0,0 +1,105 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +//src/metadata/metadata_io.c +//metadata_io + +#undef static +#undef inline + +/* + * This headers must be in test source file. It's important that cmocka.h is + * last. + */ +#include +#include +#include +#include +#include "print_desc.h" + +/* + * Headers from tested target. + */ +#include "metadata.h" +#include "metadata_io.h" +#include "../engine/cache_engine.h" +#include "../engine/engine_common.h" +#include "../engine/engine_bf.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_io.h" +#include "../utils/utils_allocator.h" +#include "../ocf_def_priv.h" + +uint32_t __wrap_metadata_io_max_page(struct ocf_cache *cache) +{ + function_called(); + return mock(); +} + +void __wrap_env_cond_resched(void) +{ +} + +void __wrap_ocf_engine_push_rq_front(struct ocf_request *rq) +{ +} + +int __wrap_ocf_realloc(void **mem, size_t size, size_t count, size_t *limit) +{ +} + +int __wrap_ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit) +{ +} + +ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache) +{ +} + +int __wrap_ocf_log_raw(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, ...) +{ +} + +int __wrap_metadata_submit_io( + struct ocf_cache *cache, + struct metadata_io *mio, + uint32_t count, + uint32_t written) +{ +} + +int __wrap_ocf_restart_meta_io(struct ocf_request *req) +{ +} + +static void metadata_io_test01(void **state) +{ + int result; + struct metadata_io mio; + struct ocf_cache cache; + + print_test_description("Check error no. when invalid operation is given"); + + mio.dir = -1; + mio.cache = &cache; + + expect_function_call(__wrap_metadata_io_max_page); + will_return(__wrap_metadata_io_max_page, 256); + + result = metadata_io(&mio); + + assert_int_equal(result, -EINVAL); +} + + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(metadata_io_test01) + }; + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/tests/ut/metadata/metadata_io.c/metadata_submit_io.c b/tests/ut/metadata/metadata_io.c/metadata_submit_io.c new file mode 100644 index 0000000..eda816f --- /dev/null +++ b/tests/ut/metadata/metadata_io.c/metadata_submit_io.c @@ -0,0 +1,245 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +//src/metadata/metadata_io.c +//metadata_submit_io + +#undef static +#undef inline + +/* + * This headers must be in test source file. It's important that cmocka.h is + * last. + */ +#include +#include +#include +#include +#include "print_desc.h" + +/* + * Headers from tested target. + */ +#include "metadata.h" +#include "metadata_io.h" +#include "../engine/cache_engine.h" +#include "../engine/engine_common.h" +#include "../engine/engine_bf.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_allocator.h" +#include "../ocf_def_priv.h" + +struct ocf_io *__wrap_ocf_new_cache_io(struct ocf_cache *cache) +{ + function_called(); + return mock_ptr_type(struct ocf_io *); +} + +int __wrap_metadata_io_write_fill(struct ocf_cache *cache, + ctx_data_t *data, uint32_t page, void *context) +{ + function_called(); + return mock(); +} + +void *__wrap_ctx_data_alloc(ocf_ctx_t ctx, uint32_t pages) +{ + function_called(); + return mock_ptr_type(void*); +} + +void __wrap_ocf_io_configure(struct ocf_io *io, uint64_t addr, + uint32_t bytes, uint32_t dir, uint32_t class, uint64_t flags) +{ + function_called(); +} + +void __wrap_metadata_io_end(struct ocf_io *io, int error) +{ +} + +void __wrap_ocf_io_set_cmpl(struct ocf_io *io, void *context, + void *context2, ocf_end_io_t fn) +{ + function_called(); +} + +int __wrap_ocf_io_set_data(struct ocf_io *io, ctx_data_t *data, + uint32_t offset) +{ + function_called(); + return mock(); +} + +void __wrap_ocf_dobj_submit_io(struct ocf_io *io) +{ + function_called(); +} + +void __wrap_ctx_data_free(ocf_ctx_t ctx, ctx_data_t *data) +{ + function_called(); +} + +void __wrap_ocf_io_put(struct ocf_io *io) +{ + function_called(); +} + +int __wrap_ocf_restart_meta_io(struct ocf_request *req) +{ +} + +void __wrap_env_atomic_inc(env_atomic *a) +{ + function_called(); +} + +static void metadata_submit_io_test01(void **state) +{ + int result; + struct metadata_io mio; + struct ocf_cache cache; + uint32_t count; + uint32_t written; + + print_test_description("Couldn't allocate new IO"); + + expect_function_call(__wrap_ocf_new_cache_io); + will_return(__wrap_ocf_new_cache_io, 0); + + result = metadata_submit_io(&cache, &mio, count, written); + + assert_int_equal(result, -ENOMEM); + assert_int_equal(mio.error, -ENOMEM); +} + +static void metadata_submit_io_test02(void **state) +{ + int result; + struct metadata_io mio; + struct ocf_cache cache; + uint32_t count; + uint32_t written; + + print_test_description("Couldn't allocate data buffer for IO"); + + expect_function_call(__wrap_ocf_new_cache_io); + will_return(__wrap_ocf_new_cache_io, 1); + + expect_function_call(__wrap_ctx_data_alloc); + will_return(__wrap_ctx_data_alloc, 0); + + expect_function_call(__wrap_ocf_io_put); + + result = metadata_submit_io(&cache, &mio, count, written); + + assert_int_equal(result, -ENOMEM); + assert_int_equal(mio.error, -ENOMEM); +} + +static void metadata_submit_io_test03(void **state) +{ + int result; + struct metadata_io mio; + struct ocf_cache cache; + uint32_t count; + uint32_t written; + int mio_err = 0; + + print_test_description("Write operation is performed successfully"); + + mio.hndl_fn = __wrap_metadata_io_write_fill; + + mio.dir = OCF_WRITE; + mio.error = mio_err; + count = 1; + + expect_function_call(__wrap_ocf_new_cache_io); + will_return(__wrap_ocf_new_cache_io, 1); + + expect_function_call(__wrap_ctx_data_alloc); + will_return(__wrap_ctx_data_alloc, 1); + + expect_function_call(__wrap_metadata_io_write_fill); + will_return(__wrap_metadata_io_write_fill, 0); + + expect_function_call(__wrap_ocf_io_configure); + + expect_function_call(__wrap_ocf_io_set_cmpl); + + expect_function_call(__wrap_ocf_io_set_data); + will_return(__wrap_ocf_io_set_data, 0); + + expect_function_call(__wrap_env_atomic_inc); + + expect_function_call(__wrap_ocf_dobj_submit_io); + + result = metadata_submit_io(&cache, &mio, count, written); + + assert_int_equal(result, 0); + assert_int_equal(mio.error, mio_err); +} + +static void metadata_submit_io_test04(void **state) +{ + int result; + int i; + int interations_before_fail; + struct metadata_io mio; + struct ocf_cache cache; + uint32_t count; + uint32_t written; + + print_test_description("Write operation is performed, but if fails at 3rd iteration"); + + mio.hndl_fn = __wrap_metadata_io_write_fill; + + mio.dir = OCF_WRITE; + count = 3; + interations_before_fail = 2; + + expect_function_call(__wrap_ocf_new_cache_io); + will_return(__wrap_ocf_new_cache_io, 1); + + expect_function_call(__wrap_ctx_data_alloc); + will_return(__wrap_ctx_data_alloc, 1); + + for (i = 0; i < interations_before_fail; i++) { + expect_function_call(__wrap_metadata_io_write_fill); + will_return(__wrap_metadata_io_write_fill, 0); + } + + expect_function_call(__wrap_metadata_io_write_fill); + will_return(__wrap_metadata_io_write_fill, 1); + + expect_function_call(__wrap_ctx_data_free); + + expect_function_call(__wrap_ocf_io_put); + + result = metadata_submit_io(&cache, &mio, count, written); + + assert_int_equal(result, 1); + assert_int_equal(mio.error, 1); +} + + +/* + * Main function. It runs tests. + */ +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(metadata_submit_io_test01), + cmocka_unit_test(metadata_submit_io_test02), + cmocka_unit_test(metadata_submit_io_test03), + cmocka_unit_test(metadata_submit_io_test04) + }; + + print_message("Example template for tests\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} + diff --git a/tests/ut/mngt/ocf_mngt_cache.c/_cache_mng_set_cache_mode_test.c b/tests/ut/mngt/ocf_mngt_cache.c/_cache_mng_set_cache_mode_test.c new file mode 100644 index 0000000..9067d00 --- /dev/null +++ b/tests/ut/mngt/ocf_mngt_cache.c/_cache_mng_set_cache_mode_test.c @@ -0,0 +1,351 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +//src/mngt/ocf_mngt_cache.c +//_cache_mng_set_cache_mode + +/* + + +*/ + +#undef static +#undef inline + +#include +#include +#include +#include +#include "print_desc.h" + +/* + * Headers from tested target. + */ +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_core_priv.h" +#include "../ocf_queue_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_part.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_device.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_utils.h" +#include "../concurrency/ocf_concurrency.h" +#include "../eviction/ops.h" +#include "../ocf_ctx_priv.h" +#include "../cleaning/cleaning.h" + +/* + * Mocked functions + */ +bool __wrap_ocf_cache_mode_is_valid(ocf_cache_mode_t mode) +{ + function_called(); + return mock(); +} + +const char *__wrap_ocf_get_io_iface_name(ocf_cache_mode_t cache_mode) +{ +} + +ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache) +{ +} + +int __wrap_ocf_log_raw(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, ...) +{ + function_called(); + return mock(); +} + +int __wrap_ocf_mngt_cache_flush_nolock(ocf_cache_t cache, bool interruption) +{ + function_called(); + return mock(); +} + +int __wrap_ocf_metadata_flush_superblock(struct ocf_cache *cache) +{ + function_called(); + return mock(); +} + +bool __wrap_env_bit_test(int nr, const volatile unsigned long *addr) +{ + function_called(); + return mock(); +} + +void __wrap_env_atomic_set(env_atomic *a, int i) +{ + function_called(); +} + +int __wrap_env_atomic_read(const env_atomic *a) +{ + function_called(); + return mock(); +} + +int __wrap_ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache) +{ + function_called(); + return mock(); +} + +static void _cache_mng_set_cache_mode_test01(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + + print_test_description("Invalid new mode produces appropirate error code"); + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + mode_old = -20; + cache.conf_meta->cache_mode = mode_old; + mode_new = ocf_cache_mode_none; + flush = 0; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, -OCF_ERR_INVAL); + assert_int_equal(cache.conf_meta->cache_mode, mode_old); + + test_free(cache.conf_meta); +} + +static void _cache_mng_set_cache_mode_test02(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + + print_test_description("Attempt to set mode the same as previous"); + + mode_old = mode_new = ocf_cache_mode_wt; + flush = 0; + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cache_mode = mode_old; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + expect_function_call(__wrap_ocf_log_raw); + will_return(__wrap_ocf_log_raw, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, 0); + assert_int_equal(cache.conf_meta->cache_mode, mode_old); + + test_free(cache.conf_meta); +} + +static void _cache_mng_set_cache_mode_test03(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + + print_test_description("Flush flag is set, but operation failed -" + " check if error code is correct"); + + mode_old = ocf_cache_mode_wt; + mode_new = ocf_cache_mode_pt; + cache.conf_meta->cache_mode = mode_old; + flush = 1; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + expect_function_call(__wrap_ocf_mngt_cache_flush_nolock); + will_return(__wrap_ocf_mngt_cache_flush_nolock, -OCF_ERR_NO_MEM); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, -OCF_ERR_NO_MEM); + assert_int_equal(cache.conf_meta->cache_mode, mode_old); +} + +static void _cache_mng_set_cache_mode_test04(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + int i; + + print_test_description("Flush flag is not set, " + "old cache mode is write back. " + "Setting new cache mode is succesfull"); + + mode_old = ocf_cache_mode_wb; + mode_new = ocf_cache_mode_wa; + flush = 0; + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cache_mode = mode_old; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + for(i = 0; i != OCF_CORE_MAX; ++i) { + expect_function_call(__wrap_env_bit_test); + will_return(__wrap_env_bit_test, 1); + + expect_function_call(__wrap_env_atomic_read); + will_return(__wrap_env_atomic_read, 1); + expect_function_call(__wrap_env_atomic_set); + } + + expect_function_call(__wrap_ocf_metadata_flush_superblock); + will_return(__wrap_ocf_metadata_flush_superblock, 0); + + expect_function_call(__wrap_ocf_log_raw); + will_return(__wrap_ocf_log_raw, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, 0); + assert_int_equal(cache.conf_meta->cache_mode, mode_new); + + test_free(cache.conf_meta); +} + +static void _cache_mng_set_cache_mode_test05(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + int i; + + print_test_description("Flush flag is not set, " + "flushing metadata superblock fails"); + + mode_old = ocf_cache_mode_wt; + mode_new = ocf_cache_mode_wa; + flush = 0; + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cache_mode = mode_old; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + expect_function_call(__wrap_ocf_metadata_flush_superblock); + will_return(__wrap_ocf_metadata_flush_superblock, 1); + + expect_function_call(__wrap_ocf_log_raw); + will_return(__wrap_ocf_log_raw, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, -OCF_ERR_WRITE_CACHE); + assert_int_equal(cache.conf_meta->cache_mode, mode_old); + + test_free(cache.conf_meta); +} + +static void _cache_mng_set_cache_mode_test06(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + int i; + + print_test_description("No flush, mode changed successfully"); + mode_old = ocf_cache_mode_wt; + mode_new = ocf_cache_mode_wa; + flush = 0; + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cache_mode = mode_old; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + expect_function_call(__wrap_ocf_metadata_flush_superblock); + will_return(__wrap_ocf_metadata_flush_superblock, 0); + + expect_function_call(__wrap_ocf_log_raw); + will_return(__wrap_ocf_log_raw, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, 0); + assert_int_equal(cache.conf_meta->cache_mode, mode_new); + + test_free(cache.conf_meta); +} + +static void _cache_mng_set_cache_mode_test07(void **state) +{ + int result; + struct ocf_cache cache; + ocf_cache_mode_t mode_old, mode_new; + uint8_t flush; + int i; + + print_test_description("Flush performed, mode changed successfully"); + mode_old = ocf_cache_mode_wt; + mode_new = ocf_cache_mode_wa; + flush = 1; + + cache.conf_meta = test_malloc(sizeof(struct ocf_superblock_config)); + cache.conf_meta->cache_mode = mode_old; + + expect_function_call(__wrap_ocf_cache_mode_is_valid); + will_return(__wrap_ocf_cache_mode_is_valid, 1); + + expect_function_call(__wrap_ocf_mngt_cache_flush_nolock); + will_return(__wrap_ocf_mngt_cache_flush_nolock, 0); + + expect_function_call(__wrap_ocf_metadata_flush_superblock); + will_return(__wrap_ocf_metadata_flush_superblock, 0); + + expect_function_call(__wrap_ocf_log_raw); + will_return(__wrap_ocf_log_raw, 0); + + result = _cache_mng_set_cache_mode(&cache, mode_new, flush); + + assert_int_equal(result, 0); + assert_int_equal(cache.conf_meta->cache_mode, mode_new); + + test_free(cache.conf_meta); +} + +/* + * Main function. It runs tests. + */ +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(_cache_mng_set_cache_mode_test01), + cmocka_unit_test(_cache_mng_set_cache_mode_test02), + cmocka_unit_test(_cache_mng_set_cache_mode_test03), + cmocka_unit_test(_cache_mng_set_cache_mode_test04), + cmocka_unit_test(_cache_mng_set_cache_mode_test05), + cmocka_unit_test(_cache_mng_set_cache_mode_test06), + cmocka_unit_test(_cache_mng_set_cache_mode_test07) + }; + + print_message("Unit test of _cache_mng_set_cache_mode\n"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/tests/ut/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c b/tests/ut/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c new file mode 100644 index 0000000..df7b942 --- /dev/null +++ b/tests/ut/mngt/ocf_mngt_cache.c/ocf_mngt_cache_set_fallback_pt_error_threshold.c @@ -0,0 +1,176 @@ +/* + *src/mngt/ocf_mngt_cache.c + * ocf_mngt_cache_set_fallback_pt_error_threshold + * + * INSERT HERE LIST OF FUNCTIONS YOU WANT TO LEAVE + * ONE FUNCTION PER LINE + * + */ + +#undef static + +#undef inline + + +#include +#include +#include +#include +#include "print_desc.h" + +#include "ocf/ocf.h" +#include "ocf_mngt_common.h" +#include "../ocf_core_priv.h" +#include "../ocf_queue_priv.h" +#include "../metadata/metadata.h" +#include "../engine/cache_engine.h" +#include "../utils/utils_part.h" +#include "../utils/utils_cache_line.h" +#include "../utils/utils_device.h" +#include "../utils/utils_io.h" +#include "../utils/utils_cache_line.h" +#include "../ocf_utils.h" +#include "../concurrency/ocf_concurrency.h" +#include "../eviction/ops.h" +#include "../ocf_ctx_priv.h" +#include "../cleaning/cleaning.h" + +int __wrap_ocf_log_raw(const struct ocf_logger *logger, ocf_logger_lvl_t lvl, + const char *fmt, ...) +{ + function_called(); +} + +ocf_ctx_t __wrap_ocf_cache_get_ctx(ocf_cache_t cache) +{ + function_called(); +} + +int __wrap_ocf_mng_cache_set_fallback_pt(ocf_cache_t cache) +{ + function_called(); +} + +bool __wrap_ocf_fallback_pt_is_on(ocf_cache_t cache) +{ +} + +static void ocf_mngt_cache_set_fallback_pt_error_threshold_test01(void **state) +{ + struct ocf_cache cache; + int new_threshold; + int result; + + print_test_description("Appropriate error code on invalid threshold value"); + + new_threshold = -1; + + result = ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(result, -OCF_ERR_INVAL); + + + new_threshold = 10000001; + + result = ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(result, -OCF_ERR_INVAL); +} + +static void ocf_mngt_cache_set_fallback_pt_error_threshold_test02(void **state) +{ + struct ocf_cache cache; + int new_threshold; + int old_threshold; + + print_test_description("Invalid new threshold value doesn't change current threshold"); + + new_threshold = -1; + old_threshold = cache.fallback_pt_error_threshold = 1000; + + ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, old_threshold); + + + new_threshold = 10000001; + old_threshold = cache.fallback_pt_error_threshold = 1000; + + ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, old_threshold); +} + +static void ocf_mngt_cache_set_fallback_pt_error_threshold_test03(void **state) +{ + struct ocf_cache cache; + int new_threshold, old_threshold; + + print_test_description("Setting new threshold value"); + + new_threshold = 5000; + old_threshold = cache.fallback_pt_error_threshold = 1000; + + ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, new_threshold); + + + new_threshold = 1000000; + old_threshold = cache.fallback_pt_error_threshold = 1000; + + ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, new_threshold); + + + new_threshold = 0; + old_threshold = cache.fallback_pt_error_threshold = 1000; + + ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, new_threshold); +} + +static void ocf_mngt_cache_set_fallback_pt_error_threshold_test04(void **state) +{ + struct ocf_cache cache; + int new_threshold; + int result; + + print_test_description("Return appropriate value on success"); + + new_threshold = 5000; + + result = ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(result, 0); + + + new_threshold = 1000000; + + result = ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(cache.fallback_pt_error_threshold, new_threshold); + + + new_threshold = 0; + + result = ocf_mngt_cache_set_fallback_pt_error_threshold(&cache, new_threshold); + + assert_int_equal(result, 0); +} + +int main(void) +{ + const struct CMUnitTest tests[] = { + cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test01), + cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test02), + cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test03), + cmocka_unit_test(ocf_mngt_cache_set_fallback_pt_error_threshold_test04) + }; + + print_message("Unit test of src/mngt/ocf_mngt_cache.c"); + + return cmocka_run_group_tests(tests, NULL, NULL); +} diff --git a/tests/ut/ocf_env/CMakeLists.txt b/tests/ut/ocf_env/CMakeLists.txt new file mode 100644 index 0000000..61b4475 --- /dev/null +++ b/tests/ut/ocf_env/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(ocf_env ocf_env.c) +target_link_libraries(ocf_env pthread) diff --git a/tests/ut/ocf_env/ocf/.gitkeep b/tests/ut/ocf_env/ocf/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/tests/ut/ocf_env/ocf_env.c b/tests/ut/ocf_env/ocf_env.c new file mode 100644 index 0000000..6d0b915 --- /dev/null +++ b/tests/ut/ocf_env/ocf_env.c @@ -0,0 +1,611 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + + +#include "ocf_env.h" +#include + +#include +#include +#include +#include + +void bug_on(int cond) +{ + /* Wrap this to use your implementation */ + assert_false(cond); +} + +void *env_malloc(size_t size, int flags) +{ + return malloc(size); +} + +void *env_zalloc(size_t size, int flags) +{ + return calloc(1, size); +} + +void env_free(const void *ptr) +{ + return free((void *) ptr); +} + +void *env_vmalloc(size_t size) +{ + return malloc(size); +} + +void *env_vzalloc(size_t size) +{ + return calloc(1, size); +} + +void env_vfree(const void *ptr) +{ + return free((void *) ptr); +} + +uint64_t env_get_free_memory(void) +{ + return sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES); +} + +/* *** ALLOCATOR *** */ + +struct _env_allocator { + /*!< Memory pool ID unique name */ + char *name; + + /*!< Size of specific item of memory pool */ + uint32_t item_size; + + /*!< Number of currently allocated items in pool */ + env_atomic count; +}; + +size_t env_allocator_align(size_t size) +{ + if (size <= 2) + return size; + return (1ULL << 32) >> __builtin_clz(size - 1); +} + +struct _env_allocator_item { + uint32_t flags; + uint32_t cpu; + char data[]; +}; + +void *env_allocator_new(env_allocator *allocator) +{ + struct _env_allocator_item *item = NULL; + + item = calloc(1, allocator->item_size); + if (item) { + item->cpu = 0; + env_atomic_inc(&allocator->count); + } + + return &item->data; +} + +env_allocator *env_allocator_create(uint32_t size, const char *name) +{ + env_allocator *allocator = calloc(1, sizeof(*allocator)); + + allocator->item_size = size + sizeof(struct _env_allocator_item); + + allocator->name = strdup(name); + + return allocator; +} + +void env_allocator_del(env_allocator *allocator, void *obj) +{ + struct _env_allocator_item *item; + + item = container_of(obj, struct _env_allocator_item, data); + + env_atomic_dec(&allocator->count); + + free(item); +} + +void env_allocator_destroy(env_allocator *allocator) +{ + if (allocator) { + if (env_atomic_read(&allocator->count)) { + fprintf(stderr, "Not all object deallocated\n"); + ENV_WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n"); + } + + free(allocator->name); + free(allocator); + } +} + +uint32_t env_allocator_item_count(env_allocator *allocator) +{ + return env_atomic_read(&allocator->count); +} + +/* *** COMPLETION *** */ + +void env_completion_init(env_completion *completion) +{ + function_called(); + check_expected_ptr(completion); +} + +void env_completion_wait(env_completion *completion) +{ + function_called(); + check_expected_ptr(completion); +} + +void env_completion_complete(env_completion *completion) +{ + function_called(); + check_expected_ptr(completion); +} + + +int env_mutex_init(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); + return mock(); +} + +void env_mutex_lock(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); +} + +int env_mutex_lock_interruptible(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); + return mock(); +} + +int env_mutex_trylock(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); + return mock(); +} + +void env_mutex_unlock(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); +} + +int env_mutex_is_locked(env_mutex *mutex) +{ + function_called(); + check_expected_ptr(mutex); + return mock(); +} + +int env_rmutex_init(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); + return mock(); +} + +void env_rmutex_lock(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); +} + +int env_rmutex_lock_interruptible(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); + return mock(); +} + +int env_rmutex_trylock(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); + return mock(); +} + +void env_rmutex_unlock(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); +} + +int env_rmutex_is_locked(env_rmutex *rmutex) +{ + function_called(); + check_expected_ptr(rmutex); + return mock(); +} + +int env_rwsem_init(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); + return mock(); +} + +void env_rwsem_up_read(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); +} + +void env_rwsem_down_read(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); +} + +int env_rwsem_down_read_trylock(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); + return mock(); +} + +void env_rwsem_up_write(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); +} + +void env_rwsem_down_write(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); +} + +int env_rwsem_down_write_trylock(env_rwsem *s) +{ + function_called(); + check_expected_ptr(s); + return mock(); +} + +int env_atomic_read(const env_atomic *a) +{ + return *a; +} + +void env_atomic_set(env_atomic *a, int i) +{ + *a = i; +} + +void env_atomic_add(int i, env_atomic *a) +{ + *a += i; +} + +void env_atomic_sub(int i, env_atomic *a) +{ + *a -= i; +} + +bool env_atomic_sub_and_test(int i, env_atomic *a) +{ + return *a-=i == 0; +} + +void env_atomic_inc(env_atomic *a) +{ + ++*a; +} + +void env_atomic_dec(env_atomic *a) +{ + --*a; +} + +bool env_atomic_dec_and_test(env_atomic *a) +{ + return --*a == 0; +} + +bool env_atomic_inc_and_test(env_atomic *a) +{ + return ++*a == 0; +} + +int env_atomic_add_return(int i, env_atomic *a) +{ + return *a+=i; +} + +int env_atomic_sub_return(int i, env_atomic *a) +{ + return *a-=i; +} + +int env_atomic_inc_return(env_atomic *a) +{ + return ++*a; +} + +int env_atomic_dec_return(env_atomic *a) +{ + return --*a; +} + +int env_atomic_cmpxchg(env_atomic *a, int old, int new_value) +{ + int oldval = *a; + if (oldval == old) + *a = new_value; + return oldval; +} + +int env_atomic_add_unless(env_atomic *a, int i, int u) +{ + int c, old; + c = *a; + for (;;) { + if (c == (u)) + break; + old = env_atomic_cmpxchg((a), c, c + (i)); + if (old == c) + break; + c = old; + } + return c != (u); +} + +long env_atomic64_read(const env_atomic64 *a) +{ + return *a; +} + +void env_atomic64_set(env_atomic64 *a, long i) +{ + *a=i; +} + +void env_atomic64_add(long i, env_atomic64 *a) +{ + *a += i; +} + +void env_atomic64_sub(long i, env_atomic64 *a) +{ + *a -= i; +} + +void env_atomic64_inc(env_atomic64 *a) +{ + ++*a; +} + +void env_atomic64_dec(env_atomic64 *a) +{ + --*a; +} + +long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new) +{ + long oldval = *a; + if (oldval == old) + *a = new; + return oldval; +} + +void env_spinlock_init(env_spinlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_spinlock_lock(env_spinlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_spinlock_unlock(env_spinlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_spinlock_lock_irq(env_spinlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_spinlock_unlock_irq(env_spinlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_rwlock_init(env_rwlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_rwlock_read_lock(env_rwlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_rwlock_read_unlock(env_rwlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_rwlock_write_lock(env_rwlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_rwlock_write_unlock(env_rwlock *l) +{ + function_called(); + check_expected_ptr(l); +} + +void env_waitqueue_init(env_waitqueue *w) +{ + w->completed = false; + w->waiting = false; + w->co = NULL; +} + +void env_waitqueue_wake_up(env_waitqueue *w) +{ + w->completed = true; + if (!w->waiting || !w->co) + return; +} + +void env_bit_set(int nr, volatile void *addr) +{ + char *byte = (char *) addr + (nr >> 3); + char mask = 1 << (nr & 7); + + __sync_or_and_fetch(byte, mask); +} + +void env_bit_clear(int nr, volatile void *addr) +{ + char *byte = (char *) addr + (nr >> 3); + char mask = 1 << (nr & 7); + + mask = ~mask; + __sync_and_and_fetch(byte, mask); +} + +bool env_bit_test(int nr, const volatile unsigned long *addr) +{ + const char *byte = (char *) addr + (nr >> 3); + char mask = 1 << (nr & 7); + + return !!(*byte & mask); +} + +/* *** SCHEDULING *** */ + +void env_touch_softlockup_wd(void) +{ + function_called(); +} + +void env_schedule(void) +{ + function_called(); +} + +int env_in_interrupt(void) +{ + function_called(); + return mock(); +} + +uint64_t env_get_tick_count(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000 + tv.tv_usec / 1000; +} + +uint64_t env_ticks_to_msecs(uint64_t j) +{ + return j; +} + +uint64_t env_ticks_to_secs(uint64_t j) +{ + return j / 1000; +} + +uint64_t env_secs_to_ticks(uint64_t j) +{ + return j * 1000; +} + +int env_memset(void *dest, size_t count, int ch) +{ + memset(dest, ch, count); + return 0; +} + +int env_memcpy(void *dest, size_t destsz, const void * src, size_t count) +{ + if (destsz < count) + memcpy(dest, src, destsz); + else + memcpy(dest, src, count); + return 0; + +} + +int env_memcmp(const void *str1, size_t n1, const void *str2, size_t n2, + int *diff) +{ + size_t n = n1 > n2 ? n2 : n1; + + *diff = memcmp(str1, str2, n); + return 0; +} + +int env_strncpy(char * dest, size_t destsz, const char *src, size_t count) +{ + if (destsz < count) + strncpy(dest, src, destsz); + else + strncpy(dest, src, count); + return 0; +} + +size_t env_strnlen(const char *str, size_t strsz) +{ + return strlen(str); +} + +void env_sort(void *base, size_t num, size_t size, + int (*cmp_fn)(const void *, const void *), + void (*swap_fn)(void *, void *, int size)) +{ + qsort(base, num, size, cmp_fn); +} + +int env_strncmp(const char * str1, const char * str2, size_t num) +{ + return strncmp(str1, str2, num); +} + +void env_msleep(uint64_t n) +{ + +} + +/* *** CRC *** */ + +uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len) +{ + function_called(); + check_expected(crc); + check_expected(len); + check_expected_ptr(data); + return mock(); +} diff --git a/tests/ut/ocf_env/ocf_env.h b/tests/ut/ocf_env/ocf_env.h new file mode 100644 index 0000000..6c4092a --- /dev/null +++ b/tests/ut/ocf_env/ocf_env.h @@ -0,0 +1,358 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __LIBOCF_ENV_H__ +#define __LIBOCF_ENV_H__ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#ifndef __USE_GNU +#define __USE_GNU +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ocf_env_list.h" + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint64_t sector_t; + +#define ENV_PRIu64 "lu" + +#define __packed __attribute__((packed)) +#define __aligned(x) __attribute__((aligned(x))) + +/* linux sector 512-bytes */ +#define ENV_SECTOR_SHIFT 9 + +#define PAGE_SIZE 4096 + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +/* *** MEMORY MANAGEMENT *** */ + +#define ENV_MEM_NORMAL 0 +#define ENV_MEM_NOIO 1 +#define ENV_MEM_ATOMIC 2 + +#define min(x, y) MIN(x, y) + +#define ENV_WARN(cond, fmt, args...) ({}) + +#define ENV_WARN_ON(cond) ({ \ + if (unlikely(cond)) \ + fprintf(stderr, "WARNING (%s:%d)\n", \ + __FILE__, __LINE__); \ + }) + +#define ENV_BUG() ({ \ + fprintf(stderr, "BUG (%s:%d)\n", \ + __FILE__, __LINE__); \ + assert(0); \ + abort(); \ + }) + +#define ENV_BUG_ON(cond) bug_on((int)cond); + +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type, member) );}) + +/* ATOMICS */ +#ifndef atomic_read +#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr)) +#endif + +#ifndef atomic_set +#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i)) +#endif + +#define likely(x) (x) +#define unlikely(x) (x) + +/* + * Bug on for testing + */ +void bug_on(int cond); + +void *env_malloc(size_t size, int flags); + +void *env_zalloc(size_t size, int flags); + +void env_free(const void *ptr); + +void *env_vmalloc(size_t size); + +void *env_vzalloc(size_t size); + +void env_vfree(const void *ptr); + +uint64_t env_get_free_memory(void); + +/* *** ALLOCATOR *** */ + +typedef struct _env_allocator env_allocator; + +env_allocator *env_allocator_create(uint32_t size, const char *name); + +void env_allocator_destroy(env_allocator *allocator); + +void *env_allocator_new(env_allocator *allocator); + +void env_allocator_del(env_allocator *allocator, void *item); + +uint32_t env_allocator_item_count(env_allocator *allocator); + +/* *** MUTEX *** */ + +typedef struct { + pthread_mutex_t m; +} env_mutex; + +int env_mutex_init(env_mutex *mutex); + +void env_mutex_lock(env_mutex *mutex); + +int env_mutex_lock_interruptible(env_mutex *mutex); + +int env_mutex_trylock(env_mutex *mutex); + +void env_mutex_unlock(env_mutex *mutex); + +int env_mutex_is_locked(env_mutex *mutex); + +/* *** RECURSIVE MUTEX *** */ + +typedef env_mutex env_rmutex; + +int env_rmutex_init(env_rmutex *rmutex); + +void env_rmutex_lock(env_rmutex *rmutex); + +int env_rmutex_lock_interruptible(env_rmutex *rmutex); + +int env_rmutex_trylock(env_rmutex *rmutex); + +void env_rmutex_unlock(env_rmutex *rmutex); + +int env_rmutex_is_locked(env_rmutex *rmutex); + +/* *** RW SEMAPHORE *** */ +typedef struct { + pthread_rwlock_t lock; +} env_rwsem; + +int env_rwsem_init(env_rwsem *s); + +void env_rwsem_up_read(env_rwsem *s); + +void env_rwsem_down_read(env_rwsem *s); + +int env_rwsem_down_read_trylock(env_rwsem *s); + +void env_rwsem_up_write(env_rwsem *s); + +void env_rwsem_down_write(env_rwsem *s); + +int env_rwsem_down_write_trylock(env_rwsem *s); + +int env_rwsem_is_locked(env_rwsem *s); + +/* *** ATOMIC VARIABLES *** */ + +typedef int env_atomic; + +typedef long env_atomic64; + +int env_atomic_read(const env_atomic *a); + +void env_atomic_set(env_atomic *a, int i); + +void env_atomic_add(int i, env_atomic *a); + +void env_atomic_sub(int i, env_atomic *a); + +bool env_atomic_sub_and_test(int i, env_atomic *a); + +void env_atomic_inc(env_atomic *a); + +void env_atomic_dec(env_atomic *a); + +bool env_atomic_dec_and_test(env_atomic *a); + +bool env_atomic_inc_and_test(env_atomic *a); + +int env_atomic_add_return(int i, env_atomic *a); + +int env_atomic_sub_return(int i, env_atomic *a); + +int env_atomic_inc_return(env_atomic *a); + +int env_atomic_dec_return(env_atomic *a); + +int env_atomic_cmpxchg(env_atomic *a, int old, int new_value); + +int env_atomic_add_unless(env_atomic *a, int i, int u); + +long env_atomic64_read(const env_atomic64 *a); + +void env_atomic64_set(env_atomic64 *a, long i); + +void env_atomic64_add(long i, env_atomic64 *a); + +void env_atomic64_sub(long i, env_atomic64 *a); + +void env_atomic64_inc(env_atomic64 *a); + +void env_atomic64_dec(env_atomic64 *a); + +long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new); + +typedef int Coroutine; + +/* *** COMPLETION *** */ +struct completion { + bool completed; + bool waiting; + Coroutine *co; +}; + +typedef struct completion env_completion; + +void env_completion_init(env_completion *completion); +void env_completion_wait(env_completion *completion); +void env_completion_complete(env_completion *completion); + +/* *** SPIN LOCKS *** */ + +typedef struct { +} env_spinlock; + +void env_spinlock_init(env_spinlock *l); + +void env_spinlock_lock(env_spinlock *l); + +void env_spinlock_unlock(env_spinlock *l); + +void env_spinlock_lock_irq(env_spinlock *l); + +void env_spinlock_unlock_irq(env_spinlock *l); + +#define env_spinlock_lock_irqsave(l, flags) \ + env_spinlock_lock(l); (void)flags; + +#define env_spinlock_unlock_irqrestore(l, flags) \ + env_spinlock_unlock(l); (void)flags; + +/* *** RW LOCKS *** */ + +typedef struct { +} env_rwlock; + +void env_rwlock_init(env_rwlock *l); + +void env_rwlock_read_lock(env_rwlock *l); + +void env_rwlock_read_unlock(env_rwlock *l); + +void env_rwlock_write_lock(env_rwlock *l); + +void env_rwlock_write_unlock(env_rwlock *l); + +/* *** WAITQUEUE *** */ + +typedef struct { + bool waiting; + bool completed; + Coroutine *co; +} env_waitqueue; + +void env_waitqueue_init(env_waitqueue *w); + +void env_waitqueue_wake_up(env_waitqueue *w); + +#define env_waitqueue_wait(w, condition) \ +({ \ + int __ret = 0; \ + if (!(condition) && !w.completed) { \ + w.waiting = true; \ + } \ + w.co = NULL; \ + w.waiting = false; \ + w.completed = false; \ + __ret = __ret; \ +}) + +/* *** BIT OPERATIONS *** */ + +void env_bit_set(int nr, volatile void *addr); + +void env_bit_clear(int nr, volatile void *addr); + +bool env_bit_test(int nr, const volatile unsigned long *addr); + +/* *** SCHEDULING *** */ + +void env_touch_softlockup_wd(void); + +void env_schedule(void); + +int env_in_interrupt(void); + +uint64_t env_get_tick_count(void); + +uint64_t env_ticks_to_msecs(uint64_t j); + +uint64_t env_ticks_to_secs(uint64_t j); + +uint64_t env_secs_to_ticks(uint64_t j); + +/* *** STRING OPERATIONS *** */ + +int env_memset(void *dest, size_t count, int ch); + +int env_memcpy(void *dest, size_t destsz, const void * src, size_t count); + +int env_memcmp(const void *str1, size_t n1, const void *str2, size_t n2, + int *diff); + +int env_strncpy(char * dest, size_t destsz, const char *src, size_t srcsz); + +size_t env_strnlen(const char *str, size_t strsz); + +int env_strncmp(const char * str1, const char * str2, size_t num); + +/* *** SORTING *** */ + +void env_sort(void *base, size_t num, size_t size, + int (*cmp_fn)(const void *, const void *), + void (*swap_fn)(void *, void *, int size)); + +void env_msleep(uint64_t n); + +/* *** CRC *** */ + +uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len); + +#endif /* __OCF_ENV_H__ */ diff --git a/tests/ut/ocf_env/ocf_env_headers.h b/tests/ut/ocf_env/ocf_env_headers.h new file mode 100644 index 0000000..6dd6a5b --- /dev/null +++ b/tests/ut/ocf_env/ocf_env_headers.h @@ -0,0 +1,13 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_ENV_HEADERS_H__ +#define __OCF_ENV_HEADERS_H__ + +#include +#include +#include + +#endif /* __OCF_ENV_HEADERS_H__ */ diff --git a/tests/ut/ocf_env/ocf_env_list.h b/tests/ut/ocf_env/ocf_env_list.h new file mode 100644 index 0000000..f6b3233 --- /dev/null +++ b/tests/ut/ocf_env/ocf_env_list.h @@ -0,0 +1,146 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#ifndef __OCF_LIST_H__ +#define __OCF_LIST_H__ + +#define LIST_POISON1 ((void *)0x101) +#define LIST_POISON2 ((void *)0x202) + +/** + * List entry structure mimicking linux kernel based one. + */ +struct list_head { + struct list_head *next; + struct list_head *prev; +}; + +/** + * start an empty list + */ +#define INIT_LIST_HEAD(l) { (l)->prev = l; (l)->next = l; } + +/** + * Add item to list head. + * @param it list entry to be added + * @param l1 list main node (head) + */ +static inline void list_add(struct list_head *it, struct list_head *l1) +{ + it->prev = l1; + it->next = l1->next; + + l1->next->prev = it; + l1->next = it; +} + +/** + * Add item it to tail. + * @param it list entry to be added + * @param l1 list main node (head) + */ +static inline void list_add_tail(struct list_head *it, struct list_head *l1) +{ + it->prev = l1->prev; + it->next = l1; + + l1->prev->next = it; + l1->prev = it; +} + +/** + * check if a list is empty (return true) + * @param l1 list main node (head) + */ +static inline int list_empty(struct list_head *l1) +{ + return l1->next == l1; +} + +/** + * delete an entry from a list + * @param it list entry to be deleted + */ +static inline void list_del(struct list_head *it) +{ + it->next->prev = it->prev; + it->prev->next = it->next; +} + +/** + * Extract an entry. + * @param list_head_i list head item, from which entry is extracted + * @param item_type type (struct) of list entry + * @param field_name name of list_head field within item_type + */ +#define list_entry(list_head_i, item_type, field_name) \ + (item_type *)(((void*)(list_head_i)) - offsetof(item_type, field_name)) + +#define list_first_entry(list_head_i, item_type, field_name) \ + list_entry((list_head_i)->next, item_type, field_name) + +/** + * @param iterator uninitialized list_head pointer, to be used as iterator + * @param plist list head (main node) + */ +#define list_for_each(iterator, plist) \ + for (iterator = (plist)->next; \ + (iterator)->next != (plist)->next; \ + iterator = (iterator)->next) + +/** + * Safe version of list_for_each which works even if entries are deleted during + * loop. + * @param iterator uninitialized list_head pointer, to be used as iterator + * @param q another uninitialized list_head, used as helper + * @param plist list head (main node) + */ +/* + * Algorithm handles situation, where q is deleted. + * consider in example 3 element list with header h: + * + * h -> 1 -> 2 -> 3 -> + *1. i q + * + *2. i q + * + *3. q i + */ +#define list_for_each_safe(iterator, q, plist) \ + for (iterator = (q = (plist)->next->next)->prev; \ + (q) != (plist)->next; \ + iterator = (q = (q)->next)->prev) + +#define _list_entry_helper(item, head, field_name) \ + list_entry(head, typeof(*item), field_name) + +/** + * Iterate over list entries. + * @param list pointer to list item (iterator) + * @param plist pointer to list_head item + * @param field_name name of list_head field in list entry + */ +#define list_for_each_entry(item, plist, field_name) \ + for (item = _list_entry_helper(item, (plist)->next, field_name); \ + _list_entry_helper(item, (item)->field_name.next, field_name) !=\ + _list_entry_helper(item, (plist)->next, field_name); \ + item = _list_entry_helper(item, (item)->field_name.next, field_name)) + +/** + * Safe version of list_for_each_entry which works even if entries are deleted + * during loop. + * @param list pointer to list item (iterator) + * @param q another pointer to list item, used as helper + * @param plist pointer to list_head item + * @param field_name name of list_head field in list entry + */ +#define list_for_each_entry_safe(item, q, plist, field_name) \ + for (item = _list_entry_helper(item, (plist)->next, field_name), \ + q = _list_entry_helper(item, (item)->field_name.next, field_name); \ + _list_entry_helper(item, (item)->field_name.next, field_name) != \ + _list_entry_helper(item, (plist)->next, field_name); \ + item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name)) + +#endif diff --git a/tests/ut/print_desc.h b/tests/ut/print_desc.h new file mode 100644 index 0000000..90de578 --- /dev/null +++ b/tests/ut/print_desc.h @@ -0,0 +1,6 @@ +/* + * Copyright(c) 2012-2018 Intel Corporation + * SPDX-License-Identifier: BSD-3-Clause-Clear + */ + +#define print_test_description(description) print_message("[ DESC ] %s\n", description);