Initial commit

Signed-off-by: Robert Baldyga <robert.baldyga@intel.com>
This commit is contained in:
Robert Baldyga
2018-11-29 15:14:21 +01:00
commit a8e1ce8cc5
178 changed files with 35378 additions and 0 deletions

267
src/utils/utils_allocator.c Normal file
View File

@@ -0,0 +1,267 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "utils_allocator.h"
#include "../ocf_priv.h"
#include "../ocf_cache_priv.h"
#include "ocf_env.h"
#define OCF_ALLOCATOR_K_MAX (128 * KiB)
static int _ocf_realloc_with_cp(void **mem, size_t size, size_t count,
size_t *limit, bool cp)
{
size_t alloc_size = size * count;
ENV_BUG_ON(!mem);
ENV_BUG_ON(!limit);
if (size && count) {
/* Memory reallocation request */
if (alloc_size > *limit) {
/* The space is not enough, we need allocate new one */
void *new_mem;
if (alloc_size > OCF_ALLOCATOR_K_MAX)
new_mem = env_vzalloc(alloc_size);
else
new_mem = env_zalloc(alloc_size, ENV_MEM_NOIO);
if (!new_mem) {
/* Allocation error */
return -1;
}
/* Free previous memory */
if (*mem) {
if (cp) {
/* copy previous content into new allocated
* memory
*/
ENV_BUG_ON(env_memcpy(new_mem, alloc_size, *mem, *limit));
}
if (*limit > OCF_ALLOCATOR_K_MAX)
env_vfree(*mem);
else
env_free(*mem);
}
/* Update limit */
*limit = alloc_size;
/* Update memory pointer */
*mem = new_mem;
return 0;
}
/*
* The memory space is enough, no action required.
* Space after allocation set to '0'
*/
if (cp)
ENV_BUG_ON(env_memset(*mem + alloc_size, *limit - alloc_size, 0));
return 0;
}
if ((size == 0) && (count == 0)) {
if ((*mem) && (*limit)) {
/* Need to free memory */
if (*limit > OCF_ALLOCATOR_K_MAX)
env_vfree(*mem);
else
env_free(*mem);
/* Update limit */
*((size_t *)limit) = 0;
*mem = NULL;
return 0;
}
if ((!*mem) && (*limit == 0)) {
/* No allocation before do nothing */
return 0;
}
}
ENV_BUG();
return -1;
}
int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit)
{
return _ocf_realloc_with_cp(mem, size, count, limit, false);
}
int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit)
{
return _ocf_realloc_with_cp(mem, size, count, limit, true);
}
void ocf_realloc_init(void **mem, size_t *limit)
{
ENV_BUG_ON(!mem);
ENV_BUG_ON(!limit);
*mem = NULL;
*((size_t *)limit) = 0;
}
enum {
ocf_mpool_1,
ocf_mpool_2,
ocf_mpool_4,
ocf_mpool_8,
ocf_mpool_16,
ocf_mpool_32,
ocf_mpool_64,
ocf_mpool_128,
ocf_mpool_max
};
struct ocf_mpool {
struct ocf_cache *cache;
/*!< Cache instance */
uint32_t item_size;
/*!< Size of specific item of memory pool */
uint32_t hdr_size;
/*!< Header size before items */
env_allocator *allocator[ocf_mpool_max];
/*!< OS handle to memory pool */
int flags;
/*!< Allocation flags */
};
#define ALLOCATOR_NAME_MAX 128
struct ocf_mpool *ocf_mpool_create(struct ocf_cache *cache,
uint32_t hdr_size, uint32_t size, int flags, int mpool_max,
const char *name_perfix)
{
uint32_t i;
char name[ALLOCATOR_NAME_MAX] = { '\0' };
int result;
struct ocf_mpool *mpool;
OCF_CHECK_NULL(name_perfix);
mpool = env_zalloc(sizeof(*mpool), ENV_MEM_NORMAL);
if (!mpool)
goto ocf_multi_allocator_create_ERROR;
mpool->item_size = size;
mpool->hdr_size = hdr_size;
mpool->cache = cache;
mpool->flags = flags;
for (i = 0; i < min(ocf_mpool_max, mpool_max + 1); i++) {
result = snprintf(name, sizeof(name), "%s_%u", name_perfix,
(1 << i));
if (result < 0 || result >= sizeof(name))
goto ocf_multi_allocator_create_ERROR;
mpool->allocator[i] = env_allocator_create(
hdr_size + (size * (1 << i)), name);
if (!mpool->allocator[i])
goto ocf_multi_allocator_create_ERROR;
}
return mpool;
ocf_multi_allocator_create_ERROR:
ocf_mpool_destroy(mpool);
return NULL;
}
void ocf_mpool_destroy(struct ocf_mpool *mallocator)
{
if (mallocator) {
uint32_t i;
for (i = 0; i < ocf_mpool_max; i++)
if (mallocator->allocator[i])
env_allocator_destroy(mallocator->allocator[i]);
env_free(mallocator);
}
}
static env_allocator *ocf_mpool_get_allocator(
struct ocf_mpool *mallocator, uint32_t count)
{
unsigned int idx;
if (unlikely(count == 0))
return ocf_mpool_1;
idx = 31 - __builtin_clz(count);
if (__builtin_ffs(count) <= idx)
idx++;
if (idx >= ocf_mpool_max)
return NULL;
return mallocator->allocator[idx];
}
void *ocf_mpool_new_f(struct ocf_mpool *mpool, uint32_t count, int flags)
{
void *items = NULL;
env_allocator *allocator;
OCF_CHECK_NULL(mpool);
allocator = ocf_mpool_get_allocator(mpool, count);
if (allocator)
items = env_allocator_new(allocator);
else
items = env_zalloc(mpool->hdr_size + (mpool->item_size * count), flags);
#ifdef ZERO_OR_NULL_PTR
if (ZERO_OR_NULL_PTR(items))
return NULL;
#endif
return items;
}
void *ocf_mpool_new(struct ocf_mpool *mpool, uint32_t count)
{
return ocf_mpool_new_f(mpool, count, mpool->flags);
}
void ocf_mpool_del(struct ocf_mpool *mpool,
void *items, uint32_t count)
{
env_allocator *allocator;
OCF_CHECK_NULL(mpool);
allocator = ocf_mpool_get_allocator(mpool, count);
if (allocator)
env_allocator_del(allocator, items);
else
env_free(items);
}

View File

@@ -0,0 +1,69 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_ALLOCATOR_H_
#define UTILS_ALLOCATOR_H_
/**
* @file utils_allocator.h
* @brief OCF memory reallocator
*/
void ocf_realloc_init(void **mem, size_t *limit);
int ocf_realloc(void **mem, size_t size, size_t count, size_t *limit);
int ocf_realloc_cp(void **mem, size_t size, size_t count, size_t *limit);
/**
* @brief Initialize memory pointer and limit before reallocator usage
*
* @param[inout] mem - Pointer to the memory
* @param[inout] limit - Variable used internally by reallocator and indicates
* last allocation size
*/
#define OCF_REALLOC_INIT(mem, limit) \
ocf_realloc_init((void **)mem, limit)
/**
* @brief De-Initialize memory pointer and limit, free memory
*
* @param[inout] mem - Pointer to the memory
* @param[inout] limit - Variable used internally by reallocator and indicates
* last allocation size
*/
#define OCF_REALLOC_DEINIT(mem, limit) \
ocf_realloc((void **)mem, 0, 0, limit)
/**
* @brief Reallocate referenced memory if it is required.
*
* @param[inout] mem - Pointer to the memory
* @param[in] size - Size of particular element
* @param[in] count - Counts of element
* @param[inout] limit - Variable used internally by reallocator and indicates
* last allocation size
*
* @return 0 - Reallocation successful, Non zero - Realocation ERROR
*/
#define OCF_REALLOC(mem, size, count, limit) \
ocf_realloc((void **)mem, size, count, limit)
/**
* @brief Reallocate referenced memory if it is required and copy old content
* into new memory space, new memory space is set to '0'
*
* @param[inout] mem - Pointer to the memory
* @param[in] size - Size of particular element
* @param[in] count - Counts of element
* @param[inout] limit - Variable used internally by reallocator and indicates
* last allocation size
*
* @return 0 - Reallocation successful, Non zero - Realocation ERROR
*/
#define OCF_REALLOC_CP(mem, size, count, limit) \
ocf_realloc_cp((void **)mem, size, count, limit)
#endif /* UTILS_ALLOCATOR_H_ */

View File

@@ -0,0 +1,177 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "utils_cache_line.h"
static inline void ocf_cleaning_set_hot_cache_line(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(cleaning_type >= ocf_cleaning_max);
if (cleaning_policy_ops[cleaning_type].set_hot_cache_line) {
cleaning_policy_ops[cleaning_type].
set_hot_cache_line(cache, line);
}
}
static void __set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, ocf_cache_line_t line,
ocf_core_id_t core_id, ocf_part_id_t part_id)
{
bool is_valid;
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
if (metadata_clear_valid_sec_changed(cache, line, start_bit, end_bit,
&is_valid)) {
/*
* Update the number of cached data for that core object
*/
env_atomic_dec(&cache->core_runtime_meta[core_id].
cached_clines);
env_atomic_dec(&cache->core_runtime_meta[core_id].
part_counters[part_id].cached_clines);
}
/* If we have waiters, do not remove cache line
* for this cache line which will use one, clear
* only valid bits
*/
if (!is_valid && !ocf_cache_line_are_waiters(cache, line)) {
ocf_purge_eviction_policy(cache, line);
ocf_metadata_sparse_cache_line(cache, line);
}
}
void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
{
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
ocf_part_id_t part_id;
ocf_core_id_t core_id;
ENV_BUG_ON(!rq);
part_id = ocf_metadata_get_partition_id(cache, line);
core_id = rq->core_id;
__set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
part_id);
ocf_metadata_flush_mark(cache, rq, map_idx, INVALID, start_bit,
end_bit);
}
void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, ocf_cache_line_t line)
{
ocf_part_id_t part_id;
ocf_core_id_t core_id;
ocf_metadata_get_core_and_part_id(cache, line, &core_id, &part_id);
__set_cache_line_invalid(cache, start_bit, end_bit, line, core_id,
part_id);
}
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
{
ocf_core_id_t core_id = rq->core_id;
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_valid_sec_changed(cache, line, start_bit, end_bit)) {
/*
* Update the number of cached data for that core object
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].
cached_clines);
env_atomic_inc(&cache->core_runtime_meta[core_id].
part_counters[part_id].cached_clines);
}
}
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
{
ocf_core_id_t core_id = rq->core_id;
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_clear_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/*
* Update the number of dirty cached data for that
* core object
*/
if (env_atomic_dec_and_test(&cache->core_runtime_meta[core_id].
dirty_clines)) {
/*
* If this is last dirty cline reset dirty
* timestamp
*/
env_atomic64_set(&cache->core_runtime_meta[core_id].
dirty_since, 0);
}
/*
* decrement dirty clines statistic for given cline
*/
env_atomic_dec(&cache->core_runtime_meta[core_id].
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].clean_cline))
evict_policy_ops[evp_type].clean_cline(cache, part_id, line);
ocf_purge_cleaning_policy(cache, line);
}
ocf_metadata_flush_mark(cache, rq, map_idx, CLEAN, start_bit, end_bit);
}
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx)
{
ocf_core_id_t core_id = rq->core_id;
ocf_cache_line_t line = rq->map[map_idx].coll_idx;
ocf_part_id_t part_id = ocf_metadata_get_partition_id(cache, line);
uint8_t evp_type = cache->conf_meta->eviction_policy_type;
ENV_BUG_ON(!(core_id < OCF_CORE_MAX));
if (metadata_set_dirty_sec_changed(cache, line, start_bit, end_bit)) {
/*
* If this is first dirty cline set dirty timestamp
*/
env_atomic64_cmpxchg(&cache->core_runtime_meta[core_id].
dirty_since, 0, env_get_tick_count());
/*
* Update the number of dirty cached data for that
* core object
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].dirty_clines);
/*
* increment dirty clines statistic for given cline
*/
env_atomic_inc(&cache->core_runtime_meta[core_id].
part_counters[part_id].dirty_clines);
if (likely(evict_policy_ops[evp_type].dirty_cline))
evict_policy_ops[evp_type].dirty_cline(cache, part_id, line);
}
ocf_cleaning_set_hot_cache_line(cache, line);
ocf_metadata_flush_mark(cache, rq, map_idx, DIRTY, start_bit, end_bit);
}

View File

@@ -0,0 +1,372 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_CACHE_LINE_H_
#define UTILS_CACHE_LINE_H_
#include "../metadata/metadata.h"
#include "../eviction/eviction.h"
#include "../eviction/ops.h"
#include "../concurrency/ocf_cache_concurrency.h"
#include "../engine/cache_engine.h"
#include "../ocf_request.h"
#include "../ocf_def_priv.h"
/**
* @file utils_cache_line.h
* @brief OCF utilities for cache line operations
*/
static inline ocf_cache_line_size_t ocf_line_size(
struct ocf_cache *cache)
{
return cache->metadata.settings.size;
}
static inline uint64_t ocf_line_pages(struct ocf_cache *cache)
{
return cache->metadata.settings.size / PAGE_SIZE;
}
static inline uint64_t ocf_line_sectors(struct ocf_cache *cache)
{
return cache->metadata.settings.sector_count;
}
static inline uint64_t ocf_line_end_sector(struct ocf_cache *cache)
{
return cache->metadata.settings.sector_end;
}
static inline uint64_t ocf_line_start_sector(struct ocf_cache *cache)
{
return cache->metadata.settings.sector_start;
}
static inline uint64_t ocf_bytes_round_lines(struct ocf_cache *cache,
uint64_t bytes)
{
return (bytes + ocf_line_size(cache) - 1) / ocf_line_size(cache);
}
static inline uint64_t ocf_bytes_2_lines(struct ocf_cache *cache,
uint64_t bytes)
{
return bytes / ocf_line_size(cache);
}
static inline uint64_t ocf_bytes_2_lines_round_up(
struct ocf_cache *cache, uint64_t bytes)
{
return DIV_ROUND_UP(bytes, ocf_line_size(cache));
}
static inline uint64_t ocf_lines_2_bytes(struct ocf_cache *cache,
uint64_t lines)
{
return lines * ocf_line_size(cache);
}
/**
* @brief Set cache line invalid
*
* @param cache Cache instance
* @param start_bit Start bit of cache line for which state will be set
* @param end_bit End bit of cache line for which state will be set
* @param rq OCF request
* @param map_idx Array index to map containing cache line to invalid
*/
void set_cache_line_invalid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
/**
* @brief Set cache line invalid without flush
*
* @param cache Cache instance
* @param start_bit Start bit of cache line for which state will be set
* @param end_bit End bit of cache line for which state will be set
* @param line Cache line to invalid
*/
void set_cache_line_invalid_no_flush(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, ocf_cache_line_t line);
/**
* @brief Set cache line valid
*
* @param cache Cache instance
* @param start_bit Start bit of cache line for which state will be set
* @param end_bit End bit of cache line for which state will be set
* @param rq OCF request
* @param map_idx Array index to map containing cache line to invalid
*/
void set_cache_line_valid(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
/**
* @brief Set cache line clean
*
* @param cache Cache instance
* @param start_bit Start bit of cache line for which state will be set
* @param end_bit End bit of cache line for which state will be set
* @param rq OCF request
* @param map_idx Array index to map containing cache line to invalid
*/
void set_cache_line_clean(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
/**
* @brief Set cache line dirty
*
* @param cache Cache instance
* @param start_bit Start bit of cache line for which state will be set
* @param end_bit End bit of cache line for which state will be set
* @param rq OCF request
* @param map_idx Array index to map containing cache line to invalid
*/
void set_cache_line_dirty(struct ocf_cache *cache, uint8_t start_bit,
uint8_t end_bit, struct ocf_request *rq, uint32_t map_idx);
/**
* @brief Remove cache line from cleaning policy
*
* @param cache - cache instance
* @param line - cache line to be removed
*
*/
static inline void ocf_purge_cleaning_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_cleaning_t clean_type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(clean_type >= ocf_cleaning_max);
/* Remove from cleaning policy */
if (cleaning_policy_ops[clean_type].purge_cache_block != NULL)
cleaning_policy_ops[clean_type].purge_cache_block(cache, line);
}
/**
* @brief Remove cache line from eviction policy
*
* @param cache - cache instance
* @param line - cache line to be removed
*/
static inline void ocf_purge_eviction_policy(struct ocf_cache *cache,
ocf_cache_line_t line)
{
ocf_eviction_purge_cache_line(cache, line);
}
/**
* @brief Set cache line clean and invalid and remove form lists
*
* @param cache Cache instance
* @param start Start bit of range in cache line to purge
* @param end End bit of range in cache line to purge
* @param rq OCF request
* @param map_idx Array index to map containing cache line to purge
*/
static inline void _ocf_purge_cache_line_sec(struct ocf_cache *cache,
uint8_t start, uint8_t stop, struct ocf_request *rq,
uint32_t map_idx)
{
set_cache_line_clean(cache, start, stop, rq, map_idx);
set_cache_line_invalid(cache, start, stop, rq, map_idx);
}
/**
* @brief Purge cache line (remove completely, form collision, move to free
* partition, from cleaning policy and eviction policy)
*
* @param rq - OCF request to purge
*/
static inline void ocf_purge_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_map_info *map = rq->map;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
/* Purge range on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
if (map[map_idx].status == LOOKUP_MISS)
continue;
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
ocf_line_sectors(cache);
}
_ocf_purge_cache_line_sec(cache, start_bit, end_bit, rq,
map_idx);
}
}
static inline void ocf_set_valid_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
struct ocf_map_info *map = rq->map;
/* Set valid bits for sectors on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
ENV_BUG_ON(map[map_idx].status == LOOKUP_MISS);
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1)
% ocf_line_sectors(cache);
}
set_cache_line_valid(cache, start_bit, end_bit, rq, map_idx);
}
}
static inline void ocf_set_dirty_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
/* Set valid bits for sectors on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
ocf_line_sectors(cache);
}
set_cache_line_dirty(cache, start_bit, end_bit, rq, map_idx);
}
}
static inline void ocf_set_clean_map_info(struct ocf_request *rq)
{
uint32_t map_idx = 0;
uint8_t start_bit;
uint8_t end_bit;
struct ocf_cache *cache = rq->cache;
uint32_t count = rq->core_line_count;
/* Set valid bits for sectors on the basis of map info
*
* | 01234567 | 01234567 | ... | 01234567 | 01234567 |
* | -----+++ | ++++++++ | +++ | ++++++++ | +++++--- |
* | first | Middle | last |
*/
for (map_idx = 0; map_idx < count; map_idx++) {
start_bit = 0;
end_bit = ocf_line_end_sector(cache);
if (map_idx == 0) {
/* First */
start_bit = BYTES_TO_SECTORS(rq->byte_position)
% ocf_line_sectors(cache);
}
if (map_idx == (count - 1)) {
/* Last */
end_bit = BYTES_TO_SECTORS(rq->byte_position +
rq->byte_length - 1) %
ocf_line_sectors(cache);
}
set_cache_line_clean(cache, start_bit, end_bit, rq, map_idx);
}
}
/**
* @brief Validate cache line size
*
* @param[in] size Cache line size
*
* @retval true cache line size is valid
* @retval false cache line is invalid
*/
static inline bool ocf_cache_line_size_is_valid(uint64_t size)
{
switch (size) {
case 4 * KiB:
case 8 * KiB:
case 16 * KiB:
case 32 * KiB:
case 64 * KiB:
return true;
default:
return false;
}
}
#endif /* UTILS_CACHE_LINE_H_ */

1049
src/utils/utils_cleaner.c Normal file

File diff suppressed because it is too large Load Diff

133
src/utils/utils_cleaner.h Normal file
View File

@@ -0,0 +1,133 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_CLEANER_H_
#define UTILS_CLEANER_H_
/**
* @brief Getter for next cache line to be cleaned
*
* @param cache[in] Cache instance
* @param getter_context[in] Context for cleaner caller
* @param item[in] Current iteration item when collection cache lines
* @param line[out] line to be cleaned
* @retval 0 When caller return zero it means take this cache line to clean
* @retval Non-zero Means skip this cache line and do not clean it
*/
typedef int (*ocf_cleaner_get_item)(struct ocf_cache *cache,
void *getter_context, uint32_t item, ocf_cache_line_t *line);
/**
* @brief Cleaning attributes for clean request
*/
struct ocf_cleaner_attribs {
uint8_t cache_line_lock : 1; /*!< Clean under cache line lock */
uint8_t metadata_locked : 1; /*< true if caller holds metadata lock */
uint8_t do_sort : 1; /*!< Sort cache lines which will be cleaned */
uint32_t count; /*!< max number of cache lines to be cleaned */
void *cmpl_context; /*!< Completion context of cleaning requester */
ocf_end_t cmpl_fn; /*!< Completion function of requester */
ocf_cleaner_get_item getter;
/*!< Getter for collecting cache lines which will be cleaned */
void *getter_context;
/*!< Context for getting cache lines */
uint32_t getter_item;
/*!< Additional variable that can be used by cleaner caller
* to iterate over items
*/
uint32_t io_queue;
};
/**
* @brief Flush table entry structure
*/
struct flush_data {
uint64_t core_line;
uint32_t cache_line;
ocf_core_id_t core_id;
};
/**
* @brief Flush table container
*/
struct flush_container {
ocf_core_id_t core_id;
struct flush_data *flush_data;
uint32_t count;
uint32_t iter;
struct ocf_cleaner_attribs attribs;
ocf_cache_t cache;
env_atomic *progress;
env_atomic *error;
env_waitqueue *wq;
env_atomic completed;
uint64_t flush_portion;
uint64_t ticks1;
uint64_t ticks2;
};
/**
* @brief Run cleaning procedure
*
* @param cache - Cache instance
* @param attribs - Cleaning attributes
*/
void ocf_cleaner_fire(struct ocf_cache *cache,
const struct ocf_cleaner_attribs *attribs);
/**
* @brief Perform cleaning procedure for specified flush data synchronously.
* Only dirty cache lines will be cleaned.
*
* @param cache - Cache instance
* @param flush - flush data to be cleaned
* @param count - Count of cache lines to be cleaned
* @param attribs - Cleaning attributes
* @return - Cleaning result. 0 - no errors, non zero errors occurred
*/
int ocf_cleaner_do_flush_data(struct ocf_cache *cache,
struct flush_data *flush, uint32_t count,
struct ocf_cleaner_attribs *attribs);
/**
* @brief Perform cleaning procedure for specified flush data. Only dirty
* cache lines will be cleaned.
*
* @param cache - Cache instance
* @param flush - flush data to be cleaned
* @param count - Count of cache lines to be cleaned
* @param attribs - Cleaning attributes
* @return - Cleaning result. 0 - no errors, non zero errors occurred
*/
int ocf_cleaner_do_flush_data_async(struct ocf_cache *cache,
struct flush_data *flush, uint32_t count,
struct ocf_cleaner_attribs *attribs);
/**
* @brief Sort flush data by core sector
*
* @param tbl Flush data to sort
* @param num Number of entries in tbl
*/
void ocf_cleaner_sort_sectors(struct flush_data *tbl, uint32_t num);
/**
* @brief Sort flush data in all flush containters
*
* @param tbl Flush containers to sort
* @param num Number of entries in fctbl
*/
void ocf_cleaner_sort_flush_containers(struct flush_container *fctbl,
uint32_t num);
#endif /* UTILS_CLEANER_H_ */

94
src/utils/utils_device.h Normal file
View File

@@ -0,0 +1,94 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_DEVICE_H_
#define UTILS_DEVICE_H_
static inline int _ocf_uuid_set(const struct ocf_data_obj_uuid *uuid,
struct ocf_metadata_uuid *muuid)
{
int result;
if (!uuid || !muuid) {
return -EINVAL;
}
if (!uuid->data || !muuid->data) {
return -EINVAL;
}
if (uuid->size > sizeof(muuid->data)) {
return -ENOBUFS;
}
result = env_memcpy(muuid->data, sizeof(muuid->data), uuid->data, uuid->size);
if (result)
return result;
result = env_memset(muuid->data + uuid->size,
sizeof(muuid->data) - uuid->size, 0);
if (result)
return result;
muuid->size = uuid->size;
return 0;
}
static inline int ocf_uuid_cache_set(ocf_cache_t cache,
const struct ocf_data_obj_uuid *uuid)
{
int result;
void *u;
if (!uuid)
return -EINVAL;
u = env_vmalloc(uuid->size);
if (!u)
return -ENOMEM;
cache->device->obj.uuid.size = 0;
result = env_memcpy(u, uuid->size,
uuid->data, uuid->size);
if (result) {
env_vfree(u);
return result;
}
cache->device->obj.uuid.data = u;
cache->device->obj.uuid.size = uuid->size;
return 0;
}
static inline void ocf_uuid_cache_clear(ocf_cache_t cache)
{
env_vfree(cache->device->obj.uuid.data);
cache->device->obj.uuid.size = 0;
}
static inline int ocf_uuid_core_set(ocf_cache_t cache, ocf_core_t core,
const struct ocf_data_obj_uuid *uuid)
{
struct ocf_data_obj_uuid *cuuid = &ocf_core_get_data_object(core)->uuid;
struct ocf_metadata_uuid *muuid = ocf_metadata_get_core_uuid(cache,
ocf_core_get_id(core));
if (_ocf_uuid_set(uuid, muuid)) {
return -ENOBUFS;
}
cuuid->data = muuid->data;
cuuid->size = muuid->size;
return 0;
}
static inline void ocf_uuid_core_clear(ocf_cache_t cache, ocf_core_t core)
{
struct ocf_data_obj_uuid uuid = { .size = 0, };
ocf_uuid_core_set(cache, core, &uuid);
}
#endif /* UTILS_MEM_H_ */

383
src/utils/utils_io.c Normal file
View File

@@ -0,0 +1,383 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "../ocf_data_obj_priv.h"
#include "../ocf_request.h"
#include "utils_io.h"
#include "utils_cache_line.h"
struct ocf_submit_io_wait_context {
env_completion complete;
int error;
env_atomic rq_remaining;
};
/*
* IO discard context
*/
struct discard_io_request {
void *context;
env_atomic req_remaining;
env_completion completion;
int error;
};
void ocf_submit_obj_flush(ocf_data_obj_t obj, ocf_end_t callback, void *ctx)
{
struct ocf_io *io;
io = ocf_dobj_new_io(obj);
if (!io) {
callback(ctx, -ENOMEM);
return;
}
ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, ctx, callback);
ocf_dobj_submit_flush(io);
}
static void _ocf_obj_flush_end(void *_cntx, int err)
{
struct ocf_submit_io_wait_context *cntx = _cntx;
cntx->error = err;
env_completion_complete(&cntx->complete);
}
int ocf_submit_obj_flush_wait(ocf_data_obj_t obj)
{
struct ocf_submit_io_wait_context cntx = { };
env_atomic_set(&cntx.rq_remaining, 1);
env_completion_init(&cntx.complete);
ocf_submit_obj_flush(obj, _ocf_obj_flush_end, &cntx);
env_completion_wait(&cntx.complete);
return cntx.error;
}
static void ocf_submit_obj_discard_wait_io(struct ocf_io *io, int error)
{
struct ocf_submit_io_wait_context *cntx = io->priv1;
if (error)
cntx->error = error;
ocf_io_put(io); /* Release IO */
if (env_atomic_dec_return(&cntx->rq_remaining))
return;
/* All discard IO handled, signal it by setting completion */
env_completion_complete(&cntx->complete);
}
int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr,
uint64_t length)
{
struct ocf_submit_io_wait_context cntx = { };
uint32_t bytes;
uint32_t max_length = ~0;
ENV_BUG_ON(env_memset(&cntx, sizeof(cntx), 0));
env_atomic_set(&cntx.rq_remaining, 1);
env_completion_init(&cntx.complete);
while (length) {
struct ocf_io *io = ocf_dobj_new_io(obj);
if (!io) {
cntx.error = -ENOMEM;
break;
}
if (length > max_length)
bytes = max_length;
else
bytes = length;
env_atomic_inc(&cntx.rq_remaining);
ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, &cntx, NULL,
ocf_submit_obj_discard_wait_io);
ocf_dobj_submit_discard(io);
addr += bytes;
length -= bytes;
}
if (env_atomic_dec_return(&cntx.rq_remaining) == 0)
env_completion_complete(&cntx.complete);
env_completion_wait(&cntx.complete);
return cntx.error;
}
static void ocf_submit_obj_zeroes_wait_io(struct ocf_io *io, int error)
{
struct ocf_submit_io_wait_context *cntx = io->priv1;
if (error)
cntx->error = error;
env_completion_complete(&cntx->complete);
}
int ocf_submit_write_zeroes_wait(ocf_data_obj_t obj, uint64_t addr,
uint64_t length)
{
struct ocf_submit_io_wait_context cntx = { };
uint32_t bytes;
uint32_t max_length = ~((uint32_t)PAGE_SIZE - 1);
uint32_t step = 0;
struct ocf_io *io;
io = ocf_dobj_new_io(obj);
if (!io)
return -ENOMEM;
while (length) {
env_completion_init(&cntx.complete);
bytes = MIN(length, max_length);
ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
ocf_io_set_cmpl(io, &cntx, NULL,
ocf_submit_obj_zeroes_wait_io);
ocf_dobj_submit_write_zeroes(io);
addr += bytes;
length -= bytes;
env_completion_wait(&cntx.complete);
if (cntx.error)
break;
OCF_COND_RESCHED_DEFAULT(step);
}
ocf_io_put(io);
return cntx.error;
}
int ocf_submit_cache_page(struct ocf_cache *cache, uint64_t addr,
int dir, void *buffer)
{
ctx_data_t *data;
struct ocf_io *io;
int result = 0;
/* Allocate resources for IO */
io = ocf_dobj_new_io(&cache->device->obj);
data = ctx_data_alloc(cache->owner, 1);
if (!io || !data) {
result = -ENOMEM;
goto end;
}
if (dir == OCF_WRITE)
ctx_data_wr_check(cache->owner, data, buffer, PAGE_SIZE);
result = ocf_io_set_data(io, data, 0);
if (result)
goto end;
ocf_io_configure(io, addr, PAGE_SIZE, dir, 0, 0);
result = ocf_submit_io_wait(io);
if (result)
goto end;
if (dir == OCF_READ)
ctx_data_rd_check(cache->owner, buffer, data, PAGE_SIZE);
end:
if (io)
ocf_io_put(io);
ctx_data_free(cache->owner, data);
return result;
}
void ocf_submit_obj_discard(ocf_data_obj_t obj, struct ocf_request *req,
ocf_end_t callback, void *ctx)
{
struct ocf_io *io = ocf_dobj_new_io(obj);
if (!io) {
callback(ctx, -ENOMEM);
return;
}
ocf_io_configure(io, SECTORS_TO_BYTES(req->discard.sector),
SECTORS_TO_BYTES(req->discard.nr_sects),
OCF_WRITE, 0, 0);
ocf_io_set_default_cmpl(io, ctx, callback);
ocf_io_set_data(io, req->data, 0);
ocf_dobj_submit_discard(io);
}
void ocf_submit_cache_reqs(struct ocf_cache *cache,
struct ocf_map_info *map_info, struct ocf_request *req, int dir,
unsigned int reqs, ocf_end_t callback, void *ctx)
{
struct ocf_counters_block *cache_stats;
uint64_t flags = req->io ? req->io->flags : 0;
uint32_t class = req->io ? req->io->class : 0;
uint64_t addr, bytes, total_bytes = 0;
struct ocf_io *io;
uint32_t i;
int err;
cache_stats = &cache->core_obj[req->core_id].
counters->cache_blocks;
if (reqs == 1) {
io = ocf_new_cache_io(cache);
if (!io) {
callback(ctx, -ENOMEM);
goto update_stats;
}
addr = ocf_metadata_map_lg2phy(cache,
map_info[0].coll_idx);
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
addr += (req->byte_position % ocf_line_size(cache));
bytes = req->byte_length;
ocf_io_configure(io, addr, bytes, dir, class, flags);
ocf_io_set_default_cmpl(io, ctx, callback);
err = ocf_io_set_data(io, req->data, 0);
if (err) {
ocf_io_put(io);
callback(ctx, err);
goto update_stats;
}
ocf_dobj_submit_io(io);
total_bytes = req->byte_length;
goto update_stats;
}
/* Issue requests to cache. */
for (i = 0; i < reqs; i++) {
io = ocf_new_cache_io(cache);
if (!io) {
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(ctx, -ENOMEM);
goto update_stats;
}
addr = ocf_metadata_map_lg2phy(cache,
map_info[i].coll_idx);
addr *= ocf_line_size(cache);
addr += cache->device->metadata_offset;
bytes = ocf_line_size(cache);
if (i == 0) {
uint64_t seek = (req->byte_position %
ocf_line_size(cache));
addr += seek;
bytes -= seek;
} else if (i == (reqs - 1)) {
uint64_t skip = (ocf_line_size(cache) -
((req->byte_position + req->byte_length) %
ocf_line_size(cache))) % ocf_line_size(cache);
bytes -= skip;
}
ocf_io_configure(io, addr, bytes, dir, class, flags);
ocf_io_set_default_cmpl(io, ctx, callback);
err = ocf_io_set_data(io, req->data, total_bytes);
if (err) {
ocf_io_put(io);
/* Finish all IOs which left with ERROR */
for (; i < reqs; i++)
callback(ctx, err);
goto update_stats;
}
ocf_dobj_submit_io(io);
total_bytes += bytes;
}
update_stats:
if (dir == OCF_WRITE)
env_atomic64_add(total_bytes, &cache_stats->write_bytes);
else if (dir == OCF_READ)
env_atomic64_add(total_bytes, &cache_stats->read_bytes);
}
void ocf_submit_obj_req(ocf_data_obj_t obj, struct ocf_request *rq,
int dir, ocf_end_t callback, void *ctx)
{
struct ocf_cache *cache = rq->cache;
struct ocf_counters_block *core_stats;
uint64_t flags = rq->io ? rq->io->flags : 0;
uint32_t class = rq->io ? rq->io->class : 0;
struct ocf_io *io;
int err;
core_stats = &cache->core_obj[rq->core_id].
counters->core_blocks;
if (dir == OCF_WRITE)
env_atomic64_add(rq->byte_length, &core_stats->write_bytes);
else if (dir == OCF_READ)
env_atomic64_add(rq->byte_length, &core_stats->read_bytes);
io = ocf_dobj_new_io(obj);
if (!io) {
callback(ctx, -ENOMEM);
return;
}
ocf_io_configure(io, rq->byte_position, rq->byte_length, dir,
class, flags);
ocf_io_set_default_cmpl(io, ctx, callback);
err = ocf_io_set_data(io, rq->data, 0);
if (err) {
ocf_io_put(io);
callback(ctx, err);
return;
}
ocf_dobj_submit_io(io);
}
static void ocf_submit_io_wait_end(struct ocf_io *io, int error)
{
struct ocf_submit_io_wait_context *context = io->priv1;
context->error |= error;
env_completion_complete(&context->complete);
}
int ocf_submit_io_wait(struct ocf_io *io)
{
struct ocf_submit_io_wait_context context;
ENV_BUG_ON(env_memset(&context, sizeof(context), 0));
env_completion_init(&context.complete);
context.error = 0;
ocf_io_set_cmpl(io, &context, NULL, ocf_submit_io_wait_end);
ocf_dobj_submit_io(io);
env_completion_wait(&context.complete);
return context.error;
}

86
src/utils/utils_io.h Normal file
View File

@@ -0,0 +1,86 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_IO_H_
#define UTILS_IO_H_
#include "../ocf_request.h"
/**
* Checks if 2 IOs are overlapping.
* @param start1 start of first range (inclusive)
* @param end1 end of first range (exclusive)
* @param start2 start of second range (inclusive)
* @param end2 end of second range (exclusive)
* @return 0 in case overlap is not detected, otherwise 1
*/
static inline int ocf_io_range_overlaps(uint32_t start1, uint32_t end1,
uint32_t start2, uint32_t end2)
{
if (start2 <= start1 && end2 >= start1)
return 1;
if (start2 >= start1 && end1 >= start2)
return 1;
return 0;
}
/**
* Checks if 2 IOs are overlapping.
* @param start1 start of first range (inclusive)
* @param count1 no of bytes, cachelines (etc) for first range
* @param start2 start of second range (inclusive)
* @param count2 no of bytes, cachelines (etc) for second range
* @return 0 in case overlap is not detected, otherwise 1
*/
static inline int ocf_io_overlaps(uint32_t start1, uint32_t count1,
uint32_t start2, uint32_t count2)
{
return ocf_io_range_overlaps(start1, start1 + count1 - 1, start2,
start2 + count2 - 1);
}
int ocf_submit_io_wait(struct ocf_io *io);
void ocf_submit_obj_flush(ocf_data_obj_t obj, ocf_end_t callback,
void *context);
int ocf_submit_obj_flush_wait(ocf_data_obj_t obj);
int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr,
uint64_t length);
void ocf_submit_obj_discard(ocf_data_obj_t obj, struct ocf_request *req,
ocf_end_t callback, void *ctx);
int ocf_submit_write_zeroes_wait(ocf_data_obj_t obj, uint64_t addr,
uint64_t length);
int ocf_submit_cache_page(struct ocf_cache *cache, uint64_t addr,
int dir, void *buffer);
void ocf_submit_obj_req(ocf_data_obj_t obj, struct ocf_request *req,
int dir, ocf_end_t callback, void *ctx);
void ocf_submit_cache_reqs(struct ocf_cache *cache,
struct ocf_map_info *map_info, struct ocf_request *req, int dir,
unsigned int reqs, ocf_end_t callback, void *ctx);
static inline struct ocf_io *ocf_new_cache_io(struct ocf_cache *cache)
{
return ocf_dobj_new_io(&cache->device->obj);
}
static inline struct ocf_io *ocf_new_core_io(struct ocf_cache *cache,
ocf_core_id_t core_id)
{
ENV_BUG_ON(core_id >= OCF_CORE_MAX);
return ocf_dobj_new_io(&cache->core_obj[core_id].obj);
}
#endif /* UTILS_IO_H_ */

64
src/utils/utils_list.c Normal file
View File

@@ -0,0 +1,64 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "utils_list.h"
void ocf_lst_sort(struct ocf_lst *lst)
{
ocf_cache_line_t iter_idx;
ocf_cache_line_t next_idx;
struct ocf_lst_entry *iter;
if (!lst->cmp) {
/* No comparator, no needed to sort */
return;
}
if (ocf_lst_empty(lst)) {
/* List is empty nothing to do */
return;
}
/* Get iterator - first element on the list, and one after */
iter_idx = lst->head->next;
iter = lst->getter(lst->cache, iter_idx);
next_idx = iter->next;
lst->getter(lst->cache, iter->next);
/* Initialize list to initial empty state, it will be empty */
lst->head->next = lst->invalid;
lst->head->prev = lst->invalid;
while (iter_idx != lst->invalid) {
ocf_lst_init_entry(lst, iter);
if (ocf_lst_empty(lst)) {
/* Put first at the the list */
ocf_lst_add(lst, iter_idx);
} else {
/* search for place where put element at the list */
struct ocf_lst_entry *pos;
ocf_cache_line_t pos_idx = lst->invalid;
for_each_lst(lst, pos, pos_idx)
if (lst->cmp(lst->cache, pos, iter) > 0)
break;
if (lst->invalid == pos_idx) {
/* Put at the end of list */
ocf_lst_add_tail(lst, iter_idx);
} else {
/* Position is known, put it before */
ocf_lst_add_before(lst, pos_idx, iter_idx);
}
}
/* Switch to next */
iter_idx = next_idx;
iter = lst->getter(lst->cache, iter_idx);
next_idx = iter->next;
}
}

207
src/utils/utils_list.h Normal file
View File

@@ -0,0 +1,207 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_LIST_H__
#define __UTILS_LIST_H__
#include "ocf_env.h"
#include "../ocf_ctx_priv.h"
#include "ocf/ocf_cache.h"
#define OCF_LST_DBG 1
#if 1 == OCF_LST_DBG
#define OCF_LST_DBG_ON(lst, cond) ({ \
if (cond) { \
ocf_log(ocf_cache_get_ctx(lst->cache), log_crit, \
"OCF list critical problem (%s:%u)\n", \
__func__, __LINE__); \
ocf_log_stack_trace(ocf_cache_get_ctx(lst->cache)); \
} \
})
#else
#define OCF_LST_DBG_ON(lst, cond)
#endif
#define OCF_LST_ENTRY_OUT(lst) ((lst)->invalid + 1)
struct ocf_lst_entry {
ocf_cache_line_t next;
ocf_cache_line_t prev;
};
typedef struct ocf_lst_entry *(*ocf_mlst_getter)(
struct ocf_cache *cache, ocf_cache_line_t idx);
typedef int (*ocf_mlst_cmp)(struct ocf_cache *cache,
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2);
struct ocf_lst {
struct ocf_lst_entry *head;
ocf_cache_line_t invalid;
ocf_mlst_getter getter;
ocf_mlst_cmp cmp;
struct ocf_cache *cache;
struct {
uint32_t active : 1;
} flags;
};
static inline void ocf_lst_init_entry(struct ocf_lst *lst,
struct ocf_lst_entry *entry)
{
entry->next = entry->prev = OCF_LST_ENTRY_OUT(lst);
}
static inline bool ocf_lst_is_entry(struct ocf_lst *lst,
struct ocf_lst_entry *entry)
{
if (entry->next == OCF_LST_ENTRY_OUT(lst) &&
entry->prev == OCF_LST_ENTRY_OUT(lst))
return false;
if (entry->next < OCF_LST_ENTRY_OUT(lst) &&
entry->prev < OCF_LST_ENTRY_OUT(lst))
return true;
ENV_BUG();
return false;
}
static inline void ocf_lst_init(struct ocf_cache *cache,
struct ocf_lst *lst, ocf_cache_line_t invalid,
ocf_mlst_getter getter, ocf_mlst_cmp cmp)
{
ocf_cache_line_t idx;
ENV_BUG_ON(env_memset(lst, sizeof(*lst), 0));
lst->head = getter(cache, invalid);
lst->head->next = invalid;
lst->head->prev = invalid;
lst->invalid = invalid;
lst->getter = getter;
lst->cmp = cmp;
lst->cache = cache;
for (idx = 0; idx < lst->invalid; idx++) {
struct ocf_lst_entry *entry = getter(cache, idx);
ocf_lst_init_entry(lst, entry);
}
}
static inline void ocf_lst_add_after(struct ocf_lst *lst,
ocf_cache_line_t at, ocf_cache_line_t idx)
{
struct ocf_lst_entry *after = lst->getter(lst->cache, at);
struct ocf_lst_entry *next = lst->getter(lst->cache, after->next);
struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, after));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
this->next = after->next;
this->prev = at;
after->next = idx;
next->prev = idx;
}
static inline void ocf_lst_add_before(struct ocf_lst *lst,
ocf_cache_line_t at, ocf_cache_line_t idx)
{
struct ocf_lst_entry *before = lst->getter(lst->cache, at);
struct ocf_lst_entry *prev = lst->getter(lst->cache, before->prev);
struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, before));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
this->next = at;
this->prev = before->prev;
before->prev = idx;
prev->next = idx;
}
static inline void ocf_lst_add(struct ocf_lst *lst, ocf_cache_line_t idx)
{
struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
struct ocf_lst_entry *next = lst->getter(lst->cache, lst->head->next);
OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
this->next = lst->head->next;
next->prev = idx;
lst->head->next = idx;
this->prev = lst->invalid;
}
static inline void ocf_lst_add_tail(struct ocf_lst *lst, ocf_cache_line_t idx)
{
struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
struct ocf_lst_entry *prev = lst->getter(lst->cache, lst->head->prev);
OCF_LST_DBG_ON(lst, ocf_lst_is_entry(lst, this));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
this->next = lst->invalid;
this->prev = lst->head->prev;
prev->next = idx;
lst->head->prev = idx;
}
static inline void ocf_lst_del(struct ocf_lst *lst, ocf_cache_line_t idx)
{
struct ocf_lst_entry *this = lst->getter(lst->cache, idx);
struct ocf_lst_entry *next = lst->getter(lst->cache, this->next);
struct ocf_lst_entry *prev = lst->getter(lst->cache, this->prev);
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, this));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, next));
OCF_LST_DBG_ON(lst, !ocf_lst_is_entry(lst, prev));
prev->next = this->next;
next->prev = this->prev;
ocf_lst_init_entry(lst, this);
}
static inline ocf_cache_line_t ocf_lst_head(struct ocf_lst *lst)
{
return lst->head->next;
}
static inline ocf_cache_line_t ocf_lst_tail(struct ocf_lst *lst)
{
return lst->head->prev;
}
static inline bool ocf_lst_empty(struct ocf_lst *lst)
{
if (lst->head->next == lst->invalid)
return true;
else
return false;
}
void ocf_lst_sort(struct ocf_lst *lst);
#define for_each_lst(lst, entry, id) \
for (id = (lst)->head->next, entry = (lst)->getter((lst)->cache, id); \
entry != (lst)->head; id = entry->next, \
entry = (lst)->getter((lst)->cache, id))
#define for_each_lst_entry(lst, entry, id, type, member) \
for (id = (lst)->head->next, \
entry = container_of((lst)->getter((lst)->cache, id), type, member); \
entry != container_of((lst)->head, type, member); \
id = entry->member.next, \
entry = container_of((lst)->getter((lst)->cache, id), type, member))
#endif /* __UTILS_LIST_H__ */

194
src/utils/utils_part.c Normal file
View File

@@ -0,0 +1,194 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "../ocf_cache_priv.h"
#include "../ocf_request.h"
#include "../metadata/metadata.h"
#include "../engine/cache_engine.h"
#include "../eviction/ops.h"
#include "utils_part.h"
static struct ocf_lst_entry *ocf_part_lst_getter_valid(
struct ocf_cache *cache, ocf_cache_line_t idx)
{
ENV_BUG_ON(idx > OCF_IO_CLASS_MAX);
return &cache->user_parts[idx].lst_valid;
}
static int ocf_part_lst_cmp_valid(struct ocf_cache *cache,
struct ocf_lst_entry *e1, struct ocf_lst_entry *e2)
{
struct ocf_user_part *p1 = container_of(e1, struct ocf_user_part,
lst_valid);
struct ocf_user_part *p2 = container_of(e2, struct ocf_user_part,
lst_valid);
size_t p1_size = ocf_cache_is_device_attached(cache) ?
p1->runtime->curr_size : 0;
size_t p2_size = ocf_cache_is_device_attached(cache) ?
p2->runtime->curr_size : 0;
int v1 = p1->config->priority;
int v2 = p2->config->priority;
/*
* If partition is invalid the priority depends on current size:
* 1. Partition is empty - move to the end of list
* 2. Partition is not empty - move to the beginning of the list. This
* partition will be evicted first
*/
if (p1->config->priority == OCF_IO_CLASS_PRIO_PINNED)
p1->config->flags.eviction = false;
else
p1->config->flags.eviction = true;
if (p2->config->priority == OCF_IO_CLASS_PRIO_PINNED)
p2->config->flags.eviction = false;
else
p2->config->flags.eviction = true;
if (!p1->config->flags.valid) {
if (p1_size) {
v1 = SHRT_MAX;
p1->config->flags.eviction = true;
} else {
v1 = SHRT_MIN;
p1->config->flags.eviction = false;
}
}
if (!p2->config->flags.valid) {
if (p2_size) {
v2 = SHRT_MAX;
p1->config->flags.eviction = true;
} else {
v2 = SHRT_MIN;
p2->config->flags.eviction = false;
}
}
if (v1 == v2) {
v1 = p1 - cache->user_parts;
v2 = p2 - cache->user_parts;
}
return v2 - v1;
}
int ocf_part_init(struct ocf_cache *cache)
{
ocf_lst_init(cache, &cache->lst_part, OCF_IO_CLASS_MAX,
ocf_part_lst_getter_valid, ocf_part_lst_cmp_valid);
return 0;
}
void ocf_part_move(struct ocf_request *rq)
{
struct ocf_cache *cache = rq->cache;
struct ocf_map_info *entry;
ocf_cache_line_t line;
ocf_part_id_t id_old, id_new;
uint32_t i;
ocf_cleaning_t type = cache->conf_meta->cleaning_policy_type;
ENV_BUG_ON(type >= ocf_cleaning_max);
entry = &rq->map[0];
for (i = 0; i < rq->core_line_count; i++, entry++) {
if (!entry->re_part) {
/* Changing partition not required */
continue;
}
if (entry->status != LOOKUP_HIT) {
/* No HIT */
continue;
}
line = entry->coll_idx;
id_old = ocf_metadata_get_partition_id(cache, line);
id_new = rq->part_id;
ENV_BUG_ON(id_old >= OCF_IO_CLASS_MAX ||
id_new >= OCF_IO_CLASS_MAX);
if (id_old == id_new) {
/* Partition of the request and cache line is the same,
* no need to change partition
*/
continue;
}
/* Remove from old eviction */
ocf_eviction_purge_cache_line(cache, line);
if (metadata_test_dirty(cache, line)) {
/*
* Remove cline from cleaning - this if for ioclass
* oriented cleaning policy (e.g. ALRU).
* TODO: Consider adding update_cache_line() ops
* to cleaning policy to let policies handle this.
*/
if (cleaning_policy_ops[type].purge_cache_block)
cleaning_policy_ops[type].
purge_cache_block(cache, line);
}
/* Let's change partition */
ocf_metadata_remove_from_partition(cache, id_old, line);
ocf_metadata_add_to_partition(cache, id_new, line);
/* Add to new eviction */
ocf_eviction_init_cache_line(cache, line, id_new);
ocf_eviction_set_hot_cache_line(cache, line);
/* Check if cache line is dirty. If yes then need to change
* cleaning policy and update partition dirty clines
* statistics.
*/
if (metadata_test_dirty(cache, line)) {
/* Add cline back to cleaning policy */
if (cleaning_policy_ops[type].set_hot_cache_line)
cleaning_policy_ops[type].
set_hot_cache_line(cache, line);
env_atomic_inc(&cache->core_runtime_meta[rq->core_id].
part_counters[id_new].dirty_clines);
env_atomic_dec(&cache->core_runtime_meta[rq->core_id].
part_counters[id_old].dirty_clines);
}
env_atomic_inc(&cache->core_runtime_meta[rq->core_id].
part_counters[id_new].cached_clines);
env_atomic_dec(&cache->core_runtime_meta[rq->core_id].
part_counters[id_old].cached_clines);
/* DONE */
}
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid)
{
struct ocf_user_part *part = &cache->user_parts[id];
if (valid ^ part->config->flags.valid) {
if (valid) {
part->config->flags.valid = true;
cache->conf_meta->valid_parts_no++;
} else {
part->config->flags.valid = false;
cache->conf_meta->valid_parts_no--;
part->config->priority = OCF_IO_CLASS_PRIO_LOWEST;
part->config->min_size = 0;
part->config->max_size = PARTITION_SIZE_MAX;
ENV_BUG_ON(env_strncpy(part->config->name, sizeof(part->config->name),
"Inactive", 9));
}
}
}

117
src/utils/utils_part.h Normal file
View File

@@ -0,0 +1,117 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __UTILS_PARTITION_H__
#define __UTILS_PARTITION_H__
#include "../ocf_request.h"
#include "../engine/cache_engine.h"
#include "../metadata/metadata_partition.h"
int ocf_part_init(struct ocf_cache *cache);
static inline bool ocf_part_is_valid(struct ocf_user_part *part)
{
return !!part->config->flags.valid;
}
static inline void ocf_part_set_prio(struct ocf_cache *cache,
struct ocf_user_part *part, int16_t prio)
{
if (part->config->priority != prio)
part->config->priority = prio;
}
static inline int16_t ocf_part_get_prio(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->priority;
return OCF_IO_CLASS_PRIO_LOWEST;
}
void ocf_part_set_valid(struct ocf_cache *cache, ocf_part_id_t id,
bool valid);
static inline bool ocf_part_is_added(struct ocf_user_part *part)
{
return !!part->config->flags.added;
}
static inline ocf_part_id_t ocf_part_class2id(ocf_cache_t cache, uint64_t class)
{
if (class < OCF_IO_CLASS_MAX)
if (cache->user_parts[class].config->flags.valid)
return class;
return PARTITION_DEFAULT;
}
void ocf_part_move(struct ocf_request *rq);
#define for_each_part(cache, part, id) \
for_each_lst_entry(&cache->lst_part, part, id, \
struct ocf_user_part, lst_valid)
static inline void ocf_part_sort(struct ocf_cache *cache)
{
ocf_lst_sort(&cache->lst_part);
}
static inline ocf_cache_mode_t ocf_part_get_cache_mode(struct ocf_cache *cache,
ocf_part_id_t part_id)
{
if (part_id < OCF_IO_CLASS_MAX)
return cache->user_parts[part_id].config->cache_mode;
return ocf_cache_mode_none;
}
static inline bool ocf_part_is_prio_valid(int64_t prio)
{
switch (prio) {
case OCF_IO_CLASS_PRIO_HIGHEST ... OCF_IO_CLASS_PRIO_LOWEST:
case OCF_IO_CLASS_PRIO_PINNED:
return true;
default:
return false;
}
}
/**
* routine checks for validity of a partition name.
*
* Following condition is checked:
* - string too long
* - string containing invalid characters (outside of low ascii)
* Following condition is NOT cheched:
* - empty string. (empty string is NOT a valid partition name, but
* this function returns true on empty string nevertheless).
*
* @return returns true if partition name is a valid name
*/
static inline bool ocf_part_is_name_valid(const char *name)
{
uint32_t length = 0;
while (*name) {
if (*name < ' ' || *name > '~')
return false;
if (',' == *name || '"' == *name)
return false;
name++;
length++;
if (length >= OCF_IO_CLASS_NAME_MAX)
return false;
}
return true;
}
#endif /* __UTILS_PARTITION_H__ */

316
src/utils/utils_rq.c Normal file
View File

@@ -0,0 +1,316 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf/ocf.h"
#include "utils_rq.h"
#include "utils_cache_line.h"
#include "../ocf_request.h"
#include "../ocf_cache_priv.h"
#define OCF_UTILS_RQ_DEBUG 0
#if 1 == OCF_UTILS_RQ_DEBUG
#define OCF_DEBUG_TRACE(cache) \
ocf_cache_log(cache, log_info, "[Utils][RQ] %s\n", __func__)
#define OCF_DEBUG_PARAM(cache, format, ...) \
ocf_cache_log(cache, log_info, "[Utils][RQ] %s - "format"\n", \
__func__, ##__VA_ARGS__)
#else
#define OCF_DEBUG_TRACE(cache)
#define OCF_DEBUG_PARAM(cache, format, ...)
#endif
enum ocf_rq_size {
ocf_rq_size_1 = 0,
ocf_rq_size_2,
ocf_rq_size_4,
ocf_rq_size_8,
ocf_rq_size_16,
ocf_rq_size_32,
ocf_rq_size_64,
ocf_rq_size_128,
ocf_rq_size_max,
};
struct ocf_rq_allocator {
env_allocator *allocator[ocf_rq_size_max];
size_t size[ocf_rq_size_max];
};
static inline size_t ocf_rq_sizeof_map(struct ocf_request *rq)
{
uint32_t lines = rq->alloc_core_line_count;
size_t size = (lines * sizeof(struct ocf_map_info));
ENV_BUG_ON(lines == 0);
return size;
}
static inline size_t ocf_rq_sizeof(uint32_t lines)
{
size_t size = sizeof(struct ocf_request) +
(lines * sizeof(struct ocf_map_info));
ENV_BUG_ON(lines == 0);
return size;
}
#define ALLOCATOR_NAME_FMT "ocf_rq_%u"
/* Max number of digits in decimal representation of unsigned int is 10 */
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + 10)
int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx)
{
int i;
struct ocf_rq_allocator *rq;
char name[ALLOCATOR_NAME_MAX] = { '\0' };
OCF_DEBUG_TRACE(cache);
ocf_ctx->resources.rq = env_zalloc(sizeof(*(ocf_ctx->resources.rq)),
ENV_MEM_NORMAL);
rq = ocf_ctx->resources.rq;
if (!rq)
goto ocf_utils_rq_init_ERROR;
for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) {
rq->size[i] = ocf_rq_sizeof(1 << i);
if (snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
(1 << i)) < 0) {
goto ocf_utils_rq_init_ERROR;
}
rq->allocator[i] = env_allocator_create(rq->size[i], name);
if (!rq->allocator[i])
goto ocf_utils_rq_init_ERROR;
OCF_DEBUG_PARAM(cache, "New request allocator, lines = %u, "
"size = %lu", 1 << i, rq->size[i]);
}
return 0;
ocf_utils_rq_init_ERROR:
ocf_rq_allocator_deinit(ocf_ctx);
return -1;
}
void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx)
{
int i;
struct ocf_rq_allocator *rq;
OCF_DEBUG_TRACE(cache);
if (!ocf_ctx->resources.rq)
return;
rq = ocf_ctx->resources.rq;
for (i = 0; i < ARRAY_SIZE(rq->allocator); i++) {
if (rq->allocator[i]) {
env_allocator_destroy(rq->allocator[i]);
rq->allocator[i] = NULL;
}
}
env_free(rq);
ocf_ctx->resources.rq = NULL;
}
static inline env_allocator *_ocf_rq_get_allocator_1(
struct ocf_cache *cache)
{
return cache->owner->resources.rq->allocator[0];
}
static env_allocator *_ocf_rq_get_allocator(
struct ocf_cache *cache, uint32_t count)
{
struct ocf_ctx *ocf_ctx = cache->owner;
unsigned int idx = 31 - __builtin_clz(count);
if (__builtin_ffs(count) <= idx)
idx++;
ENV_BUG_ON(count == 0);
if (idx >= ocf_rq_size_max)
return NULL;
return ocf_ctx->resources.rq->allocator[idx];
}
static void start_cache_req(struct ocf_request *rq)
{
ocf_cache_t cache = rq->cache;
rq->d2c = 1;
if (env_atomic_read(&cache->attached)) {
rq->d2c = 0 ;
env_atomic_inc(&cache->pending_cache_requests);
if (!env_atomic_read(&cache->attached)) {
rq->d2c = 1;
env_atomic_dec(&cache->pending_cache_requests);
}
}
}
struct ocf_request *ocf_rq_new(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
{
uint64_t core_line_first, core_line_last, core_line_count;
struct ocf_request *rq;
env_allocator *allocator;
if (likely(bytes)) {
core_line_first = ocf_bytes_2_lines(cache, addr);
core_line_last = ocf_bytes_2_lines(cache, addr + bytes - 1);
core_line_count = core_line_last - core_line_first + 1;
} else {
core_line_first = ocf_bytes_2_lines(cache, addr);
core_line_last = core_line_first;
core_line_count = 1;
}
allocator = _ocf_rq_get_allocator(cache, core_line_count);
if (allocator) {
rq = env_allocator_new(allocator);
} else {
rq = env_allocator_new(_ocf_rq_get_allocator_1(cache));
}
if (unlikely(!rq))
return NULL;
if (allocator)
rq->map = rq->__map;
OCF_DEBUG_TRACE(cache);
rq->cache = cache;
env_atomic_inc(&cache->pending_requests);
start_cache_req(rq);
rq->io_queue = 0;
env_atomic_set(&rq->ref_count, 1);
rq->core_id = core_id;
rq->byte_position = addr;
rq->byte_length = bytes;
rq->core_line_first = core_line_first;
rq->core_line_last = core_line_last;
rq->core_line_count = core_line_count;
rq->alloc_core_line_count = core_line_count;
rq->rw = rw;
rq->part_id = PARTITION_DEFAULT;
return rq;
}
int ocf_rq_alloc_map(struct ocf_request *rq)
{
if (rq->map)
return 0;
rq->map = env_zalloc(ocf_rq_sizeof_map(rq), ENV_MEM_NOIO);
if (!rq->map) {
rq->error = -ENOMEM;
return -ENOMEM;
}
return 0;
}
struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
{
struct ocf_request *rq;
rq = ocf_rq_new(cache, core_id, addr, bytes, rw);
if (likely(rq) && ocf_rq_alloc_map(rq)) {
ocf_rq_put(rq);
return NULL;
}
return rq;
}
struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw)
{
struct ocf_request *rq;
rq = ocf_rq_new_extended(cache, core_id, addr,
MIN(bytes, MAX_TRIM_RQ_SIZE),rw);
if (!rq)
return NULL;
rq->discard.sector = BYTES_TO_SECTORS(addr);
rq->discard.nr_sects = BYTES_TO_SECTORS(bytes);
rq->discard.handled = 0;
return rq;
}
void ocf_rq_get(struct ocf_request *rq)
{
OCF_DEBUG_TRACE(rq->cache);
env_atomic_inc(&rq->ref_count);
}
void ocf_rq_put(struct ocf_request *rq)
{
env_allocator *allocator;
if (env_atomic_dec_return(&rq->ref_count))
return;
OCF_DEBUG_TRACE(rq->cache);
if (!rq->d2c && !env_atomic_dec_return(
&rq->cache->pending_cache_requests)) {
env_waitqueue_wake_up(&rq->cache->pending_cache_wq);
}
env_atomic_dec(&rq->cache->pending_requests);
allocator = _ocf_rq_get_allocator(rq->cache,
rq->alloc_core_line_count);
if (allocator) {
env_allocator_del(allocator, rq);
} else {
env_free(rq->map);
env_allocator_del(_ocf_rq_get_allocator_1(rq->cache), rq);
}
}
void ocf_rq_clear_info(struct ocf_request *rq)
{
ENV_BUG_ON(env_memset(&rq->info, sizeof(rq->info), 0));
}
void ocf_rq_clear_map(struct ocf_request *rq)
{
if (likely(rq->map))
ENV_BUG_ON(env_memset(rq->map,
sizeof(rq->map[0]) * rq->core_line_count, 0));
}
uint32_t ocf_rq_get_allocated(struct ocf_cache *cache)
{
return env_atomic_read(&cache->pending_requests);
}

154
src/utils/utils_rq.h Normal file
View File

@@ -0,0 +1,154 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef UTILS_RQ_H_
#define UTILS_RQ_H_
#include "../ocf_request.h"
/**
* @file utils_rq.h
* @brief OCF request allocation utilities
*/
struct ocf_rq_allocator;
/**
* @brief Initialize OCF request allocation utility
*
* @param cache - OCF cache instance
* @return Operation status 0 - successful, non-zero failure
*/
int ocf_rq_allocator_init(struct ocf_ctx *ocf_ctx);
/**
* @brief De-initialize OCF request allocation utility
*
* @param cache - OCF cache instance
*/
void ocf_rq_allocator_deinit(struct ocf_ctx *ocf_ctx);
/**
* @brief Allocate new OCF request
*
* @param cache - OCF cache instance
* @param core_id - Core id
* @param addr - LBA of request
* @param bytes - number of bytes of request
* @param rw - Read or Write
*
* @return new OCF request
*/
struct ocf_request *ocf_rq_new(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
/**
* @brief Allocate OCF request map
*
* @param rq OCF request
*
* @retval 0 Allocation succeed
* @retval non-zero Allocation failed
*/
int ocf_rq_alloc_map(struct ocf_request *rq);
/**
* @brief Allocate new OCF request with NOIO map allocation for huge request
*
* @param cache - OCF cache instance
* @param core_id - Core id
* @param addr - LBA of request
* @param bytes - number of bytes of request
* @param rw - Read or Write
*
* @return new OCF request
*/
struct ocf_request *ocf_rq_new_extended(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
/**
* @brief Allocate new OCF request for DISCARD operation
*
* @param cache - OCF cache instance
* @param core_id - Core id
* @param addr - LBA of request
* @param bytes - number of bytes of request
* @param rw - Read or Write
*
* @return new OCF request
*/
struct ocf_request *ocf_rq_new_discard(struct ocf_cache *cache,
ocf_core_id_t core_id, uint64_t addr, uint32_t bytes, int rw);
/**
* @brief Get number of allocated requests
*
* @param cache OCF cache instance
*
* @return Number of allocated requests
*/
uint32_t ocf_rq_get_allocated(struct ocf_cache *cache);
/**
* @brief Increment OCF request reference count
*
* @param rq - OCF request
*/
void ocf_rq_get(struct ocf_request *rq);
/**
* @brief Decrement OCF request reference. If reference is 0 then request will
* be deallocated
*
* @param rq - OCF request
*/
void ocf_rq_put(struct ocf_request *rq);
/**
* @brief Clear OCF request info
*
* @param rq - OCF request
*/
void ocf_rq_clear_info(struct ocf_request *rq);
/**
* @brief Clear OCF request map
*
* @param rq - OCF request
*/
void ocf_rq_clear_map(struct ocf_request *rq);
/**
* @brief Clear OCF request
*
* @param rq - OCF request
*/
static inline void ocf_rq_clear(struct ocf_request *rq)
{
ocf_rq_clear_info(rq);
ocf_rq_clear_map(rq);
env_atomic_set(&rq->lock_remaining, 0);
env_atomic_set(&rq->req_remaining, 0);
}
/**
* @brief Return OCF request reference count
*
* @param rq - OCF request
* @return OCF request reference count
*/
static inline int ocf_rq_ref_count(struct ocf_request *rq)
{
return env_atomic_read(&rq->ref_count);
}
static inline bool ocf_rq_is_4k(uint64_t addr, uint32_t bytes)
{
return !((addr % PAGE_SIZE) || (bytes % PAGE_SIZE));
}
#endif /* UTILS_RQ_H_ */