nhit promotion policy implementation

Signed-off-by: Jan Musial <jan.musial@intel.com>
This commit is contained in:
Jan Musial 2019-08-01 12:53:13 +02:00
parent 8ed525ae7f
commit 62a0ccdd9f
11 changed files with 654 additions and 47 deletions

View File

@ -210,7 +210,7 @@ typedef enum {
* OCF supported promotion policy types
*/
typedef enum {
ocf_promotion_nop = 0,
ocf_promotion_always = 0,
/*!< No promotion policy. Cache inserts are not filtered */
ocf_promotion_nhit,
@ -219,7 +219,7 @@ typedef enum {
ocf_promotion_max,
/*!< Stopper of enumerator */
ocf_promotion_default = ocf_promotion_nop,
ocf_promotion_default = ocf_promotion_always,
/*!< Default promotion policy */
} ocf_promotion_t;

View File

@ -60,8 +60,6 @@ static void _ocf_discard_core_complete(struct ocf_io *io, int error)
OCF_DEBUG_RQ(req, "Core DISCARD Completion");
ocf_promotion_req_purge(req->cache->promotion_policy, req);
_ocf_discard_complete_req(req, error);
ocf_io_put(io);
@ -85,6 +83,8 @@ static int _ocf_discard_core(struct ocf_request *req)
ocf_volume_submit_discard(io);
ocf_promotion_req_purge(req->cache->promotion_policy, req);
return 0;
}

View File

@ -1,36 +0,0 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../metadata/metadata.h"
#include "nhit.h"
ocf_error_t nhit_init(ocf_cache_t cache, ocf_promotion_policy_t policy)
{
return 0;
}
void nhit_deinit(ocf_promotion_policy_t policy)
{
}
ocf_error_t nhit_set_param(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t param_value)
{
return 0;
}
void nhit_req_purge(ocf_promotion_policy_t policy,
struct ocf_request *req)
{
}
bool nhit_req_should_promote(ocf_promotion_policy_t policy, struct ocf_request *req)
{
return true;
}

393
src/promotion/nhit/hash.c Normal file
View File

@ -0,0 +1,393 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "../../ocf_priv.h"
#include "hash.h"
/* Implementation of hashmap-ish structure for tracking core lines in nhit
* promotion policy. It consists of two arrays:
* - hash_map - indexed by hash formed from core id and core lba pairs,
* contains pointers (indices) to the ring buffer. Each index in this array
* has its own rwsem.
* - ring_buffer - contains per-coreline metadata and collision info for
* open addressing. If we run out of space in this array, we just loop around
* and insert elements from the beggining. So lifetime of a core line varies
* depending on insertion and removal rate.
*
* Operations:
* - query(core_id, core_lba):
* Check if core line is present in structure, bump up counter and
* return its value.
*
* - insertion(core_id, core_lba):
* Insert new core line into structure
* 1. get new slot from ring buffer
* a. check if current slot under rb_pointer is valid
* and if not - exit
* b. set current slot as invalid and increment rb_pointer
* 2. lock hash bucket for new item and for ring buffer slot
* (if non-empty) in ascending bucket id order (to avoid deadlock)
* 3. insert new data, add to collision
* 4. unlock both hash buckets
* 5. commit rb_slot (mark it as valid)
*
* Insertion explained visually:
*
* Suppose that we want to add a new core line with hash value H which already has
* some colliding core lines
*
* hash(core_id, core_lba)
* +
* |
* v
* +--+--+--+--+--+--+--+--++-+--+
* | | |I | | | | | |H | | hash_map
* +--+--++-+--+--+--+--+--++-+--+
* __| rb_pointer | _______
* | + | | |
* v v v | v
* +--++-+--+---+-+--+--+--++-+--+--++-+--++-+
* | | | | |X | | | | | | | | | | ring_buffer
* +--++-+--+---+-+--+--+--++-+--+--++-+--+--+
* | ^ | ^
* |________| |________|
*
* Since rb_pointer is pointing to occupied rb slot we need to write-lock hash
* bucket I associated with this slot and remove it from collision list.
* We've gained an empty slot and we use slot X for new hash H entry.
*
* +--+--+--+--+--+--+--+--+--+--+
* | | |I | | | | | |H | | hash_map
* +--+--++-+--+--+--+--+--++-+--+
* __| rb_pointer | _______
* | + | | |
* v v v | v
* +--++-+--+-----++-+--+--++-+--+--++-+--++-+
* | | | | |X | | | | | | | | | | ring_buffer
* +--+--+--+---+-+--+--+--++-+--+--++-+--++-+
* ^ | ^ |
* | |________| |
* |__________________________|
*
* Valid field in nhit_list_elem is guarded by rb_pointer_lock to make sure we
* won't try to use the same slot in two threads. That would be possible if in
* time between removal from collision and insertion into the new one the
* rb_pointer would go around the whole structure (likeliness depends on size of
* ring_buffer).
*/
#define HASH_PRIME 4099
struct nhit_list_elem {
ocf_core_id_t core_id;
uint64_t core_lba;
ocf_cache_line_t coll_prev;
ocf_cache_line_t coll_next;
bool valid;
env_atomic counter;
};
struct nhit_hash {
ocf_cache_line_t hash_entries;
uint64_t rb_entries;
ocf_cache_line_t *hash_map;
env_rwsem *hash_locks;
struct nhit_list_elem *ring_buffer;
uint64_t rb_pointer;
env_spinlock rb_pointer_lock;
};
ocf_error_t hash_init(uint64_t hash_size, nhit_hash_t *ctx)
{
int result = 0;
struct nhit_hash *new_ctx;
uint64_t i;
new_ctx = env_vzalloc(sizeof(*new_ctx));
if (!new_ctx) {
result = -OCF_ERR_NO_MEM;
goto exit;
}
new_ctx->rb_entries = hash_size;
new_ctx->hash_entries = OCF_DIV_ROUND_UP(
new_ctx->rb_entries / 4,
HASH_PRIME) * HASH_PRIME - 1;
new_ctx->hash_map = env_vzalloc(
new_ctx->hash_entries * sizeof(*new_ctx->hash_map));
if (!new_ctx->hash_map) {
result = -OCF_ERR_NO_MEM;
goto dealloc_ctx;
}
for (i = 0; i < new_ctx->hash_entries; i++)
new_ctx->hash_map[i] = new_ctx->rb_entries;
new_ctx->hash_locks = env_vzalloc(
new_ctx->hash_entries * sizeof(*new_ctx->hash_locks));
if (!new_ctx->hash_locks) {
result = -OCF_ERR_NO_MEM;
goto dealloc_hash;
}
for (i = 0; i < new_ctx->hash_entries; i++) {
if (env_rwsem_init(&new_ctx->hash_locks[i])) {
result = -OCF_ERR_UNKNOWN;
goto dealloc_locks;
}
}
new_ctx->ring_buffer = env_vzalloc(
new_ctx->rb_entries * sizeof(*new_ctx->ring_buffer));
if (!new_ctx->ring_buffer) {
result = -OCF_ERR_NO_MEM;
goto dealloc_locks;
}
for (i = 0; i < new_ctx->rb_entries; i++) {
new_ctx->ring_buffer[i].core_id = OCF_CORE_ID_INVALID;
new_ctx->ring_buffer[i].valid = true;
env_atomic_set(&new_ctx->ring_buffer[i].counter, 0);
}
env_spinlock_init(&new_ctx->rb_pointer_lock);
new_ctx->rb_pointer = 0;
*ctx = new_ctx;
return 0;
dealloc_locks:
for (i = 0; i < new_ctx->hash_entries; i++)
ENV_BUG_ON(env_rwsem_destroy(&new_ctx->hash_locks[i]));
env_vfree(new_ctx->hash_locks);
dealloc_hash:
env_vfree(new_ctx->hash_map);
dealloc_ctx:
env_vfree(new_ctx);
exit:
return result;
}
void hash_deinit(nhit_hash_t ctx)
{
ocf_cache_line_t i;
env_spinlock_destroy(&ctx->rb_pointer_lock);
for (i = 0; i < ctx->hash_entries; i++)
ENV_BUG_ON(env_rwsem_destroy(&ctx->hash_locks[i]));
env_vfree(ctx->ring_buffer);
env_vfree(ctx->hash_locks);
env_vfree(ctx->hash_map);
env_vfree(ctx);
}
static ocf_cache_line_t hash_function(ocf_core_id_t core_id, uint64_t core_lba,
uint64_t limit)
{
if (core_id == OCF_CORE_ID_INVALID)
return limit;
return (ocf_cache_line_t) ((core_lba * HASH_PRIME + core_id) % limit);
}
static ocf_cache_line_t core_line_lookup(nhit_hash_t ctx,
ocf_core_id_t core_id, uint64_t core_lba)
{
ocf_cache_line_t hash = hash_function(core_id, core_lba,
ctx->hash_entries);
ocf_cache_line_t needle = ctx->rb_entries;
ocf_cache_line_t cur;
for (cur = ctx->hash_map[hash]; cur != ctx->rb_entries;
cur = ctx->ring_buffer[cur].coll_next) {
struct nhit_list_elem *cur_elem = &ctx->ring_buffer[cur];
if (cur_elem->core_lba == core_lba &&
cur_elem->core_id == core_id) {
needle = cur;
break;
}
}
return needle;
}
static inline bool get_rb_slot(nhit_hash_t ctx, uint64_t *slot)
{
bool result = true;
OCF_CHECK_NULL(slot);
env_spinlock_lock(&ctx->rb_pointer_lock);
*slot = ctx->rb_pointer;
result = ctx->ring_buffer[*slot].valid;
ctx->ring_buffer[*slot].valid = false;
ctx->rb_pointer = (*slot + 1) % ctx->rb_entries;
env_spinlock_unlock(&ctx->rb_pointer_lock);
return result;
}
static inline void commit_rb_slot(nhit_hash_t ctx, uint64_t slot)
{
env_spinlock_lock(&ctx->rb_pointer_lock);
ctx->ring_buffer[slot].valid = true;
env_spinlock_unlock(&ctx->rb_pointer_lock);
}
static void collision_remove(nhit_hash_t ctx, uint64_t slot_id)
{
struct nhit_list_elem *slot = &ctx->ring_buffer[slot_id];
ocf_cache_line_t hash = hash_function(slot->core_id, slot->core_lba,
ctx->hash_entries);
if (slot->core_id == OCF_CORE_ID_INVALID)
return;
slot->core_id = OCF_CORE_ID_INVALID;
if (slot->coll_prev != ctx->rb_entries)
ctx->ring_buffer[slot->coll_prev].coll_next = slot->coll_next;
if (slot->coll_next != ctx->rb_entries)
ctx->ring_buffer[slot->coll_next].coll_prev = slot->coll_prev;
if (ctx->hash_map[hash] == slot_id)
ctx->hash_map[hash] = slot->coll_next;
}
static void collision_insert_new(nhit_hash_t ctx,
uint64_t slot_id, ocf_core_id_t core_id,
uint64_t core_lba)
{
ocf_cache_line_t hash = hash_function(core_id, core_lba,
ctx->hash_entries);
struct nhit_list_elem *slot = &ctx->ring_buffer[slot_id];
slot->core_id = core_id;
slot->core_lba = core_lba;
slot->coll_next = ctx->hash_map[hash];
slot->coll_prev = ctx->rb_entries;
env_atomic_set(&slot->counter, 1);
if (ctx->hash_map[hash] != ctx->rb_entries)
ctx->ring_buffer[ctx->hash_map[hash]].coll_prev = slot_id;
ctx->hash_map[hash] = slot_id;
}
static inline void write_lock_hashes(nhit_hash_t ctx, ocf_core_id_t core_id1,
uint64_t core_lba1, ocf_core_id_t core_id2, uint64_t core_lba2)
{
ocf_cache_line_t hash1 = hash_function(core_id1, core_lba1,
ctx->hash_entries);
ocf_cache_line_t hash2 = hash_function(core_id2, core_lba2,
ctx->hash_entries);
ocf_cache_line_t lock_order[2] = {
OCF_MIN(hash1, hash2),
OCF_MAX(hash1, hash2)};
if (lock_order[0] != ctx->hash_entries)
env_rwsem_down_write(&ctx->hash_locks[lock_order[0]]);
if (lock_order[1] != ctx->hash_entries)
env_rwsem_down_write(&ctx->hash_locks[lock_order[1]]);
}
static inline void write_unlock_hashes(nhit_hash_t ctx, ocf_core_id_t core_id1,
uint64_t core_lba1, ocf_core_id_t core_id2, uint64_t core_lba2)
{
ocf_cache_line_t hash1 = hash_function(core_id1, core_lba1,
ctx->hash_entries);
ocf_cache_line_t hash2 = hash_function(core_id2, core_lba2,
ctx->hash_entries);
if (hash1 != ctx->hash_entries)
env_rwsem_up_write(&ctx->hash_locks[hash1]);
if (hash2 != ctx->hash_entries)
env_rwsem_up_write(&ctx->hash_locks[hash2]);
}
void hash_insert(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba)
{
uint64_t slot_id;
struct nhit_list_elem *slot;
ocf_core_id_t slot_core_id;
uint64_t slot_core_lba;
if (!get_rb_slot(ctx, &slot_id))
return;
slot = &ctx->ring_buffer[slot_id];
slot_core_id = slot->core_id;
slot_core_lba = slot->core_lba;
write_lock_hashes(ctx, core_id, core_lba, slot_core_id, slot_core_lba);
collision_remove(ctx, slot_id);
collision_insert_new(ctx, slot_id, core_id, core_lba);
write_unlock_hashes(ctx, core_id, core_lba, slot_core_id, slot_core_lba);
commit_rb_slot(ctx, slot_id);
}
bool hash_query(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba,
int32_t *counter)
{
OCF_CHECK_NULL(counter);
ocf_cache_line_t hash = hash_function(core_id, core_lba,
ctx->hash_entries);
uint64_t rb_idx;
env_rwsem_down_read(&ctx->hash_locks[hash]);
rb_idx = core_line_lookup(ctx, core_id, core_lba);
if (rb_idx == ctx->rb_entries) {
env_rwsem_up_read(&ctx->hash_locks[hash]);
return false;
}
*counter = env_atomic_inc_return(&ctx->ring_buffer[rb_idx].counter);
env_rwsem_up_read(&ctx->hash_locks[hash]);
return true;
}
void hash_set_occurences(nhit_hash_t ctx, ocf_core_id_t core_id,
uint64_t core_lba, int32_t occurences)
{
ocf_cache_line_t hash = hash_function(core_id, core_lba,
ctx->hash_entries);
uint64_t rb_idx;
env_rwsem_down_read(&ctx->hash_locks[hash]);
rb_idx = core_line_lookup(ctx, core_id, core_lba);
if (rb_idx == ctx->rb_entries) {
env_rwsem_up_read(&ctx->hash_locks[hash]);
return;
}
env_atomic_set(&ctx->ring_buffer[rb_idx].counter, occurences);
env_rwsem_up_read(&ctx->hash_locks[hash]);
}

25
src/promotion/nhit/hash.h Normal file
View File

@ -0,0 +1,25 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef NHIT_HASH_H_
#define NHIT_HASH_H_
#include "ocf/ocf.h"
typedef struct nhit_hash *nhit_hash_t;
ocf_error_t hash_init(uint64_t hash_size, nhit_hash_t *ctx);
void hash_deinit(nhit_hash_t ctx);
void hash_insert(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba);
bool hash_query(nhit_hash_t ctx, ocf_core_id_t core_id, uint64_t core_lba,
int32_t *counter);
void hash_set_occurences(nhit_hash_t ctx, ocf_core_id_t core_id,
uint64_t core_lba, int32_t occurences);
#endif /* NHIT_HASH_H_ */

182
src/promotion/nhit/nhit.c Normal file
View File

@ -0,0 +1,182 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "hash.h"
#include "../../metadata/metadata.h"
#include "../../ocf_priv.h"
#include "../../engine/engine_common.h"
#include "nhit.h"
#include "../ops.h"
#define NHIT_MAPPING_RATIO 2
#define NHIT_MIN_THRESHOLD 1
#define NHIT_MAX_THRESHOLD 1000
#define NHIT_THRESHOLD_DEFAULT 3
struct nhit_policy_context {
nhit_hash_t hash_map;
/* Configurable parameters */
env_atomic insertion_threshold;
};
ocf_error_t nhit_init(ocf_cache_t cache, ocf_promotion_policy_t policy)
{
struct nhit_policy_context *ctx;
int result = 0;
ctx = env_vmalloc(sizeof(*ctx));
if (!ctx) {
result = -OCF_ERR_NO_MEM;
goto exit;
}
result = hash_init(ocf_metadata_get_cachelines_count(cache) *
NHIT_MAPPING_RATIO, &ctx->hash_map);
if (result)
goto dealloc_ctx;
env_atomic_set(&ctx->insertion_threshold, NHIT_THRESHOLD_DEFAULT);
policy->ctx = ctx;
return 0;
dealloc_ctx:
env_vfree(ctx);
exit:
ocf_cache_log(cache, log_err, "Error initializing nhit promotion policy\n");
return result;
}
void nhit_deinit(ocf_promotion_policy_t policy)
{
struct nhit_policy_context *ctx = policy->ctx;
hash_deinit(ctx->hash_map);
env_vfree(ctx);
policy->ctx = NULL;
}
ocf_error_t nhit_set_param(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t param_value)
{
struct nhit_policy_context *ctx = policy->ctx;
ocf_error_t result = 0;
switch (param_id) {
case nhit_insertion_threshold:
if (param_value > NHIT_MIN_THRESHOLD &&
param_value < NHIT_MAX_THRESHOLD) {
env_atomic_set(&ctx->insertion_threshold, param_value);
} else {
ocf_cache_log(policy->owner, log_err, "Invalid nhit "
"promotion policy insertion threshold!\n");
result = -OCF_ERR_INVAL;
}
break;
default:
ocf_cache_log(policy->owner, log_err, "Invalid nhit "
"promotion policy parameter (%u)!\n",
param_id);
result = -OCF_ERR_INVAL;
break;
}
return result;
}
ocf_error_t nhit_get_param(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t *param_value)
{
struct nhit_policy_context *ctx = policy->ctx;
ocf_error_t result = 0;
OCF_CHECK_NULL(param_value);
switch (param_id) {
case nhit_insertion_threshold:
*param_value = env_atomic_read(&ctx->insertion_threshold);
break;
default:
ocf_cache_log(policy->owner, log_err, "Invalid nhit "
"promotion policy parameter (%u)!\n",
param_id);
result = -OCF_ERR_INVAL;
break;
}
return result;
}
static void core_line_purge(struct nhit_policy_context *ctx, ocf_core_id_t core_id,
uint64_t core_lba)
{
hash_set_occurences(ctx->hash_map, core_id, core_lba, 0);
}
void nhit_req_purge(ocf_promotion_policy_t policy,
struct ocf_request *req)
{
struct nhit_policy_context *ctx = policy->ctx;
uint32_t i;
uint64_t core_line;
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(req->map[i]);
core_line_purge(ctx, entry->core_id, entry->core_line);
}
}
static bool core_line_should_promote(struct nhit_policy_context *ctx,
ocf_core_id_t core_id, uint64_t core_lba)
{
bool hit;
int32_t counter;
hit = hash_query(ctx->hash_map, core_id, core_lba, &counter);
if (hit) {
/* we have a hit, return now */
return env_atomic_read(&ctx->insertion_threshold) <= counter;
}
hash_insert(ctx->hash_map, core_id, core_lba);
return false;
}
bool nhit_req_should_promote(ocf_promotion_policy_t policy,
struct ocf_request *req)
{
struct nhit_policy_context *ctx = policy->ctx;
bool result = true;
uint32_t i;
uint64_t core_line;
for (i = 0, core_line = req->core_line_first;
core_line <= req->core_line_last; core_line++, i++) {
struct ocf_map_info *entry = &(req->map[i]);
if (!core_line_should_promote(ctx, entry->core_id,
entry->core_line)) {
result = false;
}
}
/* We don't want to reject even partially
* hit requests - this way we could trigger passthrough and invalidation.
* Let's let it in! */
return result || req->info.hit_no;
}

View File

@ -7,8 +7,13 @@
#define NHIT_PROMOTION_POLICY_H_
#include "ocf/ocf.h"
#include "../ocf_request.h"
#include "promotion.h"
#include "../../ocf_request.h"
#include "../promotion.h"
enum nhit_param {
nhit_insertion_threshold,
nhit_param_max
};
ocf_error_t nhit_init(ocf_cache_t cache, ocf_promotion_policy_t policy);
@ -17,6 +22,9 @@ void nhit_deinit(ocf_promotion_policy_t policy);
ocf_error_t nhit_set_param(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t param_value);
ocf_error_t nhit_get_param(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t *param_value);
void nhit_req_purge(ocf_promotion_policy_t policy,
struct ocf_request *req);

View File

@ -10,6 +10,7 @@
#include "promotion.h"
struct ocf_promotion_policy {
ocf_cache_t owner;
ocf_promotion_t type;
void *ctx;
};
@ -28,6 +29,10 @@ struct promotion_policy_ops {
uint64_t param_value);
/*!< Set promotion policy parameter */
ocf_error_t (*get_param)(ocf_promotion_policy_t policy, uint8_t param_id,
uint64_t *param_value);
/*!< Get promotion policy parameter */
void (*req_purge)(ocf_promotion_policy_t policy,
struct ocf_request *req);
/*!< Call when request core lines have been inserted or it is

View File

@ -7,11 +7,11 @@
#include "promotion.h"
#include "ops.h"
#include "nhit.h"
#include "nhit/nhit.h"
struct promotion_policy_ops ocf_promotion_policies[ocf_promotion_max] = {
[ocf_promotion_nop] = {
.name = "nop",
[ocf_promotion_always] = {
.name = "always",
},
[ocf_promotion_nhit] = {
.name = "nhit",
@ -35,6 +35,7 @@ ocf_error_t ocf_promotion_init(ocf_cache_t cache, ocf_promotion_policy_t *policy
return -OCF_ERR_NO_MEM;
(*policy)->type = type;
(*policy)->owner = cache;
if (ocf_promotion_policies[type].init)
result = ocf_promotion_policies[type].init(cache, *policy);
@ -70,6 +71,22 @@ ocf_error_t ocf_promotion_set_param(ocf_promotion_policy_t policy,
return result;
}
ocf_error_t ocf_promotion_get_param(ocf_promotion_policy_t policy,
uint8_t param_id, uint64_t *param_value)
{
ocf_promotion_t type = policy->type;
ocf_error_t result = 0;
ENV_BUG_ON(type >= ocf_promotion_max);
if (ocf_promotion_policies[type].get_param) {
result = ocf_promotion_policies[type].get_param(policy, param_id,
param_value);
}
return result;
}
void ocf_promotion_req_purge(ocf_promotion_policy_t policy,
struct ocf_request *req)
{

View File

@ -43,6 +43,18 @@ void ocf_promotion_deinit(ocf_promotion_policy_t policy);
ocf_error_t ocf_promotion_set_param(ocf_promotion_policy_t policy,
uint8_t param_id, uint64_t param_value);
/**
* @brief Get promotion policy parameter
*
* @param[in] policy promotion policy handle
* @param[in] param_id id of parameter to be set
* @param[out] param_value value of parameter to be set
*
* @retval ocf_error_t
*/
ocf_error_t ocf_promotion_get_param(ocf_promotion_policy_t policy,
uint8_t param_id, uint64_t *param_value);
/**
* @brief Update promotion policy after cache lines have been promoted to cache
* or discarded from core device

View File

@ -14,13 +14,14 @@ MAIN_DIRECTORY_OF_UNIT_TESTS = "../tests/"
# Paths to all directories, in which tests are stored. All paths should be relative to
# MAIN_DIRECTORY_OF_UNIT_TESTS
DIRECTORIES_WITH_TESTS_LIST = ["cleaning/", "metadata/", "mngt/", "concurrency/", "engine/",
"eviction/", "utils/"]
"eviction/", "utils/", "promotion/"]
# Paths to all directories containing files with sources. All paths should be relative to
# MAIN_DIRECTORY_OF_TESTED_PROJECT
DIRECTORIES_TO_INCLUDE_FROM_PROJECT_LIST = ["src/", "src/cleaning/", "src/engine/", "src/metadata/",
"src/eviction/", "src/mngt/", "src/concurrency/",
"src/utils/", "inc/"]
"src/utils/", "inc/", "src/promotion/",
"src/promotion/nhit/"]
# Paths to all directories from directory with tests, which should also be included
DIRECTORIES_TO_INCLUDE_FROM_UT_LIST = ["ocf_env/"]