Delete duplicates

Delete unnecessary duplicated ocf_env files from UT directory.

Signed-off-by: Slawomir_Jankowski <slawomir.jankowski@intel.com>
This commit is contained in:
Slawomir_Jankowski 2019-10-01 14:25:26 +02:00
parent 0696ec1f9c
commit e248c68846
4 changed files with 0 additions and 1046 deletions

View File

@ -1,543 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_env.h"
#include <sys/types.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <cmocka.h>
void bug_on(int cond)
{
/* Wrap this to use your implementation */
assert_false(cond);
}
void *env_malloc(size_t size, int flags)
{
return malloc(size);
}
void *env_zalloc(size_t size, int flags)
{
return calloc(1, size);
}
void env_free(const void *ptr)
{
return free((void *) ptr);
}
void *env_vmalloc(size_t size)
{
return malloc(size);
}
void *env_vzalloc(size_t size)
{
return calloc(1, size);
}
void env_vfree(const void *ptr)
{
return free((void *) ptr);
}
uint64_t env_get_free_memory(void)
{
return sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
/* *** ALLOCATOR *** */
struct _env_allocator {
/*!< Memory pool ID unique name */
char *name;
/*!< Size of specific item of memory pool */
uint32_t item_size;
/*!< Number of currently allocated items in pool */
env_atomic count;
};
size_t env_allocator_align(size_t size)
{
if (size <= 2)
return size;
return (1ULL << 32) >> __builtin_clz(size - 1);
}
struct _env_allocator_item {
uint32_t flags;
uint32_t cpu;
char data[];
};
void *env_allocator_new(env_allocator *allocator)
{
struct _env_allocator_item *item = NULL;
item = calloc(1, allocator->item_size);
if (item) {
item->cpu = 0;
env_atomic_inc(&allocator->count);
}
return &item->data;
}
env_allocator *env_allocator_create(uint32_t size, const char *name)
{
env_allocator *allocator = calloc(1, sizeof(*allocator));
allocator->item_size = size + sizeof(struct _env_allocator_item);
allocator->name = strdup(name);
return allocator;
}
void env_allocator_del(env_allocator *allocator, void *obj)
{
struct _env_allocator_item *item;
item = container_of(obj, struct _env_allocator_item, data);
env_atomic_dec(&allocator->count);
free(item);
}
void env_allocator_destroy(env_allocator *allocator)
{
if (allocator) {
if (env_atomic_read(&allocator->count)) {
fprintf(stderr, "Not all object deallocated\n");
ENV_WARN(true, "Cleanup problem\n");
}
free(allocator->name);
free(allocator);
}
}
/* *** COMPLETION *** */
void env_completion_init(env_completion *completion)
{
function_called();
check_expected_ptr(completion);
}
void env_completion_wait(env_completion *completion)
{
function_called();
check_expected_ptr(completion);
}
void env_completion_complete(env_completion *completion)
{
function_called();
check_expected_ptr(completion);
}
int env_mutex_init(env_mutex *mutex)
{
return 0;
}
int env_mutex_destroy(env_mutex *mutex)
{
return 0;
}
void env_mutex_lock(env_mutex *mutex)
{
}
int env_mutex_lock_interruptible(env_mutex *mutex)
{
return 0;
}
void env_mutex_unlock(env_mutex *mutex)
{
}
int env_rmutex_init(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
return mock();
}
void env_rmutex_lock(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
}
int env_rmutex_lock_interruptible(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
return mock();
}
void env_rmutex_unlock(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
}
int env_rwsem_init(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
return mock();
}
void env_rwsem_up_read(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
}
void env_rwsem_down_read(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
}
int env_rwsem_down_read_trylock(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
return mock();
}
void env_rwsem_up_write(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
}
void env_rwsem_down_write(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
}
int env_rwsem_down_write_trylock(env_rwsem *s)
{
function_called();
check_expected_ptr(s);
return mock();
}
int env_atomic_read(const env_atomic *a)
{
return *a;
}
void env_atomic_set(env_atomic *a, int i)
{
*a = i;
}
void env_atomic_add(int i, env_atomic *a)
{
*a += i;
}
void env_atomic_sub(int i, env_atomic *a)
{
*a -= i;
}
void env_atomic_inc(env_atomic *a)
{
++*a;
}
void env_atomic_dec(env_atomic *a)
{
--*a;
}
bool env_atomic_dec_and_test(env_atomic *a)
{
return --*a == 0;
}
int env_atomic_add_return(int i, env_atomic *a)
{
return *a+=i;
}
int env_atomic_sub_return(int i, env_atomic *a)
{
return *a-=i;
}
int env_atomic_inc_return(env_atomic *a)
{
return ++*a;
}
int env_atomic_dec_return(env_atomic *a)
{
return --*a;
}
int env_atomic_cmpxchg(env_atomic *a, int old, int new_value)
{
int oldval = *a;
if (oldval == old)
*a = new_value;
return oldval;
}
int env_atomic_add_unless(env_atomic *a, int i, int u)
{
int c, old;
c = *a;
for (;;) {
if (c == (u))
break;
old = env_atomic_cmpxchg((a), c, c + (i));
if (old == c)
break;
c = old;
}
return c != (u);
}
long env_atomic64_read(const env_atomic64 *a)
{
return *a;
}
void env_atomic64_set(env_atomic64 *a, long i)
{
*a=i;
}
void env_atomic64_add(long i, env_atomic64 *a)
{
*a += i;
}
void env_atomic64_sub(long i, env_atomic64 *a)
{
*a -= i;
}
void env_atomic64_inc(env_atomic64 *a)
{
++*a;
}
void env_atomic64_dec(env_atomic64 *a)
{
--*a;
}
long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new)
{
long oldval = *a;
if (oldval == old)
*a = new;
return oldval;
}
int env_spinlock_init(env_spinlock *l)
{
return 0;
}
int env_spinlock_destroy(env_spinlock *l)
{
return 0;
}
void env_spinlock_lock(env_spinlock *l)
{
}
int env_spinlock_trylock(env_spinlock *l)
{
return 0;
}
void env_spinlock_unlock(env_spinlock *l)
{
}
void env_rwlock_init(env_rwlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_rwlock_read_lock(env_rwlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_rwlock_read_unlock(env_rwlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_rwlock_write_lock(env_rwlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_rwlock_write_unlock(env_rwlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_bit_set(int nr, volatile void *addr)
{
char *byte = (char *) addr + (nr >> 3);
char mask = 1 << (nr & 7);
__sync_or_and_fetch(byte, mask);
}
void env_bit_clear(int nr, volatile void *addr)
{
char *byte = (char *) addr + (nr >> 3);
char mask = 1 << (nr & 7);
mask = ~mask;
__sync_and_and_fetch(byte, mask);
}
bool env_bit_test(int nr, const volatile unsigned long *addr)
{
const char *byte = (char *) addr + (nr >> 3);
char mask = 1 << (nr & 7);
return !!(*byte & mask);
}
/* *** SCHEDULING *** */
void env_touch_softlockup_wd(void)
{
function_called();
}
int env_in_interrupt(void)
{
function_called();
return mock();
}
uint64_t env_get_tick_count(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
uint64_t env_ticks_to_msecs(uint64_t j)
{
return j;
}
uint64_t env_ticks_to_secs(uint64_t j)
{
return j / 1000;
}
uint64_t env_secs_to_ticks(uint64_t j)
{
return j * 1000;
}
int env_memset(void *dest, size_t count, int ch)
{
memset(dest, ch, count);
return 0;
}
int env_memcpy(void *dest, size_t destsz, const void * src, size_t count)
{
if (destsz < count)
memcpy(dest, src, destsz);
else
memcpy(dest, src, count);
return 0;
}
int env_memcmp(const void *str1, size_t n1, const void *str2, size_t n2,
int *diff)
{
size_t n = n1 > n2 ? n2 : n1;
*diff = memcmp(str1, str2, n);
return 0;
}
int env_strncpy(char * dest, size_t destsz, const char *src, size_t count)
{
if (destsz < count)
strncpy(dest, src, destsz);
else
strncpy(dest, src, count);
return 0;
}
size_t env_strnlen(const char *str, size_t strsz)
{
return strlen(str);
}
void env_sort(void *base, size_t num, size_t size,
int (*cmp_fn)(const void *, const void *),
void (*swap_fn)(void *, void *, int size))
{
qsort(base, num, size, cmp_fn);
}
int env_strncmp(const char * str1, const char * str2, size_t num)
{
return strncmp(str1, str2, num);
}
void env_msleep(uint64_t n)
{
}
/* *** CRC *** */
uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)
{
function_called();
check_expected(crc);
check_expected(len);
check_expected_ptr(data);
return mock();
}
void env_cond_resched(void)
{
}

View File

@ -1,344 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __LIBOCF_ENV_H__
#define __LIBOCF_ENV_H__
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_GNU
#define __USE_GNU
#endif
#include <linux/limits.h>
#include <linux/stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <pthread.h>
#include <assert.h>
#include <semaphore.h>
#include <errno.h>
#include <limits.h>
#include <unistd.h>
#include <sys/time.h>
#include "ocf_env_list.h"
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef uint64_t sector_t;
#define ENV_PRIu64 "lu"
#define __packed __attribute__((packed))
#define __aligned(x) __attribute__((aligned(x)))
/* linux sector 512-bytes */
#define ENV_SECTOR_SHIFT 9
#define PAGE_SIZE 4096
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
/* *** MEMORY MANAGEMENT *** */
#define ENV_MEM_NORMAL 0
#define ENV_MEM_NOIO 1
#define ENV_MEM_ATOMIC 2
#define ENV_WARN(cond, fmt, args...) ({})
#define ENV_WARN_ON(cond) ({ \
if (unlikely(cond)) \
fprintf(stderr, "WARNING (%s:%d)\n", \
__FILE__, __LINE__); \
})
#define ENV_BUG() ({ \
fprintf(stderr, "BUG (%s:%d)\n", \
__FILE__, __LINE__); \
assert(0); \
abort(); \
})
#define ENV_BUG_ON(cond) ({ \
int eval = cond; \
if (eval) { \
print_message("%s:%u BUG: %s\n", __FILE__, __LINE__, #cond); \
bug_on(eval); \
} \
})
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type, member) );})
/* ATOMICS */
#ifndef atomic_read
#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr))
#endif
#ifndef atomic_set
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
#endif
#define likely(x) (x)
#define unlikely(x) (x)
/*
* Bug on for testing
*/
void bug_on(int cond);
void *env_malloc(size_t size, int flags);
void *env_zalloc(size_t size, int flags);
void env_free(const void *ptr);
void *env_vmalloc(size_t size);
void *env_vzalloc(size_t size);
void env_vfree(const void *ptr);
uint64_t env_get_free_memory(void);
/* *** ALLOCATOR *** */
typedef struct _env_allocator env_allocator;
env_allocator *env_allocator_create(uint32_t size, const char *name);
void env_allocator_destroy(env_allocator *allocator);
void *env_allocator_new(env_allocator *allocator);
void env_allocator_del(env_allocator *allocator, void *item);
/* *** MUTEX *** */
typedef struct {
pthread_mutex_t m;
} env_mutex;
int env_mutex_init(env_mutex *mutex);
int env_mutex_destroy(env_mutex *mutex);
void env_mutex_lock(env_mutex *mutex);
int env_mutex_lock_interruptible(env_mutex *mutex);
void env_mutex_unlock(env_mutex *mutex);
/* *** RECURSIVE MUTEX *** */
typedef env_mutex env_rmutex;
int env_rmutex_init(env_rmutex *rmutex);
void env_rmutex_lock(env_rmutex *rmutex);
int env_rmutex_lock_interruptible(env_rmutex *rmutex);
void env_rmutex_unlock(env_rmutex *rmutex);
/* *** RW SEMAPHORE *** */
typedef struct {
pthread_rwlock_t lock;
} env_rwsem;
int env_rwsem_init(env_rwsem *s);
void env_rwsem_up_read(env_rwsem *s);
void env_rwsem_down_read(env_rwsem *s);
int env_rwsem_down_read_trylock(env_rwsem *s);
void env_rwsem_up_write(env_rwsem *s);
void env_rwsem_down_write(env_rwsem *s);
int env_rwsem_down_write_trylock(env_rwsem *s);
/* *** ATOMIC VARIABLES *** */
typedef int env_atomic;
typedef long env_atomic64;
int env_atomic_read(const env_atomic *a);
void env_atomic_set(env_atomic *a, int i);
void env_atomic_add(int i, env_atomic *a);
void env_atomic_sub(int i, env_atomic *a);
void env_atomic_inc(env_atomic *a);
void env_atomic_dec(env_atomic *a);
bool env_atomic_dec_and_test(env_atomic *a);
int env_atomic_add_return(int i, env_atomic *a);
int env_atomic_sub_return(int i, env_atomic *a);
int env_atomic_inc_return(env_atomic *a);
int env_atomic_dec_return(env_atomic *a);
int env_atomic_cmpxchg(env_atomic *a, int old, int new_value);
int env_atomic_add_unless(env_atomic *a, int i, int u);
long env_atomic64_read(const env_atomic64 *a);
void env_atomic64_set(env_atomic64 *a, long i);
void env_atomic64_add(long i, env_atomic64 *a);
void env_atomic64_sub(long i, env_atomic64 *a);
void env_atomic64_inc(env_atomic64 *a);
void env_atomic64_dec(env_atomic64 *a);
long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new);
typedef int Coroutine;
/* *** COMPLETION *** */
struct completion {
bool completed;
bool waiting;
Coroutine *co;
};
typedef struct completion env_completion;
void env_completion_init(env_completion *completion);
void env_completion_wait(env_completion *completion);
void env_completion_complete(env_completion *completion);
/* *** SPIN LOCKS *** */
typedef struct {
} env_spinlock;
int env_spinlock_init(env_spinlock *l);
int env_spinlock_destroy(env_spinlock *l);
void env_spinlock_lock(env_spinlock *l);
int env_spinlock_trylock(env_spinlock *l);
void env_spinlock_unlock(env_spinlock *l);
#define env_spinlock_lock_irqsave(l, flags) \
env_spinlock_lock(l); (void)flags;
#define env_spinlock_unlock_irqrestore(l, flags) \
env_spinlock_unlock(l); (void)flags;
/* *** RW LOCKS *** */
typedef struct {
} env_rwlock;
void env_rwlock_init(env_rwlock *l);
void env_rwlock_read_lock(env_rwlock *l);
void env_rwlock_read_unlock(env_rwlock *l);
void env_rwlock_write_lock(env_rwlock *l);
void env_rwlock_write_unlock(env_rwlock *l);
/* *** WAITQUEUE *** */
typedef struct {
bool waiting;
bool completed;
Coroutine *co;
} env_waitqueue;
#define env_waitqueue_wait(w, condition) \
({ \
int __ret = 0; \
if (!(condition) && !w.completed) { \
w.waiting = true; \
} \
w.co = NULL; \
w.waiting = false; \
w.completed = false; \
__ret = __ret; \
})
/* *** BIT OPERATIONS *** */
void env_bit_set(int nr, volatile void *addr);
void env_bit_clear(int nr, volatile void *addr);
bool env_bit_test(int nr, const volatile unsigned long *addr);
/* *** SCHEDULING *** */
void env_touch_softlockup_wd(void);
int env_in_interrupt(void);
uint64_t env_get_tick_count(void);
uint64_t env_ticks_to_msecs(uint64_t j);
uint64_t env_ticks_to_secs(uint64_t j);
uint64_t env_secs_to_ticks(uint64_t j);
/* *** STRING OPERATIONS *** */
int env_memset(void *dest, size_t count, int ch);
int env_memcpy(void *dest, size_t destsz, const void * src, size_t count);
int env_memcmp(const void *str1, size_t n1, const void *str2, size_t n2,
int *diff);
int env_strncpy(char * dest, size_t destsz, const char *src, size_t srcsz);
size_t env_strnlen(const char *str, size_t strsz);
int env_strncmp(const char * str1, const char * str2, size_t num);
/* *** SORTING *** */
void env_sort(void *base, size_t num, size_t size,
int (*cmp_fn)(const void *, const void *),
void (*swap_fn)(void *, void *, int size));
void env_msleep(uint64_t n);
/* *** CRC *** */
uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
void env_cond_resched(void);
#endif /* __OCF_ENV_H__ */

View File

@ -1,13 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_ENV_HEADERS_H__
#define __OCF_ENV_HEADERS_H__
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#endif /* __OCF_ENV_HEADERS_H__ */

View File

@ -1,146 +0,0 @@
/*
* Copyright(c) 2012-2018 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_LIST_H__
#define __OCF_LIST_H__
#define LIST_POISON1 ((void *)0x101)
#define LIST_POISON2 ((void *)0x202)
/**
* List entry structure mimicking linux kernel based one.
*/
struct list_head {
struct list_head *next;
struct list_head *prev;
};
/**
* start an empty list
*/
#define INIT_LIST_HEAD(l) { (l)->prev = l; (l)->next = l; }
/**
* Add item to list head.
* @param it list entry to be added
* @param l1 list main node (head)
*/
static inline void list_add(struct list_head *it, struct list_head *l1)
{
it->prev = l1;
it->next = l1->next;
l1->next->prev = it;
l1->next = it;
}
/**
* Add item it to tail.
* @param it list entry to be added
* @param l1 list main node (head)
*/
static inline void list_add_tail(struct list_head *it, struct list_head *l1)
{
it->prev = l1->prev;
it->next = l1;
l1->prev->next = it;
l1->prev = it;
}
/**
* check if a list is empty (return true)
* @param l1 list main node (head)
*/
static inline int list_empty(struct list_head *l1)
{
return l1->next == l1;
}
/**
* delete an entry from a list
* @param it list entry to be deleted
*/
static inline void list_del(struct list_head *it)
{
it->next->prev = it->prev;
it->prev->next = it->next;
}
/**
* Extract an entry.
* @param list_head_i list head item, from which entry is extracted
* @param item_type type (struct) of list entry
* @param field_name name of list_head field within item_type
*/
#define list_entry(list_head_i, item_type, field_name) \
(item_type *)(((void*)(list_head_i)) - offsetof(item_type, field_name))
#define list_first_entry(list_head_i, item_type, field_name) \
list_entry((list_head_i)->next, item_type, field_name)
/**
* @param iterator uninitialized list_head pointer, to be used as iterator
* @param plist list head (main node)
*/
#define list_for_each(iterator, plist) \
for (iterator = (plist)->next; \
(iterator)->next != (plist)->next; \
iterator = (iterator)->next)
/**
* Safe version of list_for_each which works even if entries are deleted during
* loop.
* @param iterator uninitialized list_head pointer, to be used as iterator
* @param q another uninitialized list_head, used as helper
* @param plist list head (main node)
*/
/*
* Algorithm handles situation, where q is deleted.
* consider in example 3 element list with header h:
*
* h -> 1 -> 2 -> 3 ->
*1. i q
*
*2. i q
*
*3. q i
*/
#define list_for_each_safe(iterator, q, plist) \
for (iterator = (q = (plist)->next->next)->prev; \
(q) != (plist)->next; \
iterator = (q = (q)->next)->prev)
#define _list_entry_helper(item, head, field_name) \
list_entry(head, typeof(*item), field_name)
/**
* Iterate over list entries.
* @param list pointer to list item (iterator)
* @param plist pointer to list_head item
* @param field_name name of list_head field in list entry
*/
#define list_for_each_entry(item, plist, field_name) \
for (item = _list_entry_helper(item, (plist)->next, field_name); \
_list_entry_helper(item, (item)->field_name.next, field_name) !=\
_list_entry_helper(item, (plist)->next, field_name); \
item = _list_entry_helper(item, (item)->field_name.next, field_name))
/**
* Safe version of list_for_each_entry which works even if entries are deleted
* during loop.
* @param list pointer to list item (iterator)
* @param q another pointer to list item, used as helper
* @param plist pointer to list_head item
* @param field_name name of list_head field in list entry
*/
#define list_for_each_entry_safe(item, q, plist, field_name) \
for (item = _list_entry_helper(item, (plist)->next, field_name), \
q = _list_entry_helper(item, (item)->field_name.next, field_name); \
_list_entry_helper(item, (item)->field_name.next, field_name) != \
_list_entry_helper(item, (plist)->next, field_name); \
item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))
#endif