OCF Cleanup

**ocf_env**: deleting unused functions from main file

Signed-off-by: Slawomir_Jankowski <slawomir.jankowski@intel.com>
This commit is contained in:
Slawomir_Jankowski 2019-08-06 13:01:23 +02:00
parent 34c8d135c2
commit 679bc38770
4 changed files with 0 additions and 206 deletions

5
env/posix/ocf_env.c vendored
View File

@ -110,11 +110,6 @@ void env_allocator_destroy(env_allocator *allocator)
}
}
uint32_t env_allocator_item_count(env_allocator *allocator)
{
return env_atomic_read(&allocator->count);
}
/* *** DEBUGING *** */
#define ENV_TRACE_DEPTH 16

101
env/posix/ocf_env.h vendored
View File

@ -169,8 +169,6 @@ void *env_allocator_new(env_allocator *allocator);
void env_allocator_del(env_allocator *allocator, void *item);
uint32_t env_allocator_item_count(env_allocator *allocator);
/* *** MUTEX *** */
typedef struct {
@ -198,28 +196,11 @@ static inline int env_mutex_lock_interruptible(env_mutex *mutex)
return 0;
}
static inline int env_mutex_trylock(env_mutex *mutex)
{
return pthread_mutex_trylock(&mutex->m) ? -OCF_ERR_NO_LOCK : 0;
}
static inline void env_mutex_unlock(env_mutex *mutex)
{
ENV_BUG_ON(pthread_mutex_unlock(&mutex->m));
}
static inline int env_mutex_is_locked(env_mutex *mutex)
{
if (env_mutex_trylock(mutex) == 0) {
env_mutex_unlock(mutex);
return 1;
}
return 0;
}
/* *** RECURSIVE MUTEX *** */
typedef env_mutex env_rmutex;
static inline int env_rmutex_init(env_rmutex *rmutex)
@ -243,21 +224,11 @@ static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex)
return env_mutex_lock_interruptible(rmutex);
}
static inline int env_rmutex_trylock(env_rmutex *rmutex)
{
return env_mutex_trylock(rmutex);
}
static inline void env_rmutex_unlock(env_rmutex *rmutex)
{
env_mutex_unlock(rmutex);
}
static inline int env_rmutex_is_locked(env_rmutex *rmutex)
{
return env_mutex_is_locked(rmutex);
}
/* *** RW SEMAPHORE *** */
typedef struct {
pthread_rwlock_t lock;
@ -298,28 +269,6 @@ static inline int env_rwsem_down_write_trylock(env_rwsem *s)
return pthread_rwlock_trywrlock(&s->lock) ? -OCF_ERR_NO_LOCK : 0;
}
static inline int env_rwsem_is_locked(env_rwsem *s)
{
if (env_rwsem_down_read_trylock(s) == 0) {
env_rwsem_up_read(s);
return 0;
}
return 1;
}
static inline int env_rwsem_down_write_interruptible(env_rwsem *s)
{
env_rwsem_down_write(s);
return 0;
}
static inline int env_rwsem_down_read_interruptible(env_rwsem *s)
{
env_rwsem_down_read(s);
return 0;
}
/* *** COMPLETION *** */
struct completion {
sem_t sem;
@ -342,12 +291,6 @@ static inline void env_completion_complete(env_completion *completion)
sem_post(&completion->sem);
}
static inline void env_completion_complete_and_exit(
env_completion *completion, int ret)
{
env_completion_complete(completion); /* TODO */
}
/* *** ATOMIC VARIABLES *** */
typedef struct {
@ -378,11 +321,6 @@ static inline void env_atomic_sub(int i, env_atomic *a)
__sync_sub_and_fetch(&a->counter, i);
}
static inline bool env_atomic_sub_and_test(int i, env_atomic *a)
{
return __sync_sub_and_fetch(&a->counter, i) == 0;
}
static inline void env_atomic_inc(env_atomic *a)
{
env_atomic_add(1, a);
@ -398,11 +336,6 @@ static inline bool env_atomic_dec_and_test(env_atomic *a)
return __sync_sub_and_fetch(&a->counter, 1) == 0;
}
static inline bool env_atomic_inc_and_test(env_atomic *a)
{
return __sync_add_and_fetch(&a->counter, 1) == 0;
}
static inline int env_atomic_add_return(int i, env_atomic *a)
{
return __sync_add_and_fetch(&a->counter, i);
@ -504,16 +437,6 @@ static inline void env_spinlock_unlock(env_spinlock *l)
ENV_BUG_ON(pthread_spin_unlock(&l->lock));
}
static inline void env_spinlock_lock_irq(env_spinlock *l)
{
env_spinlock_lock(l);
}
static inline void env_spinlock_unlock_irq(env_spinlock *l)
{
env_spinlock_unlock(l);
}
#define env_spinlock_lock_irqsave(l, flags) \
(void)flags; \
env_spinlock_lock(l)
@ -559,16 +482,6 @@ typedef struct {
sem_t sem;
} env_waitqueue;
static inline void env_waitqueue_init(env_waitqueue *w)
{
sem_init(&w->sem, 0, 0);
}
static inline void env_waitqueue_wake_up(env_waitqueue *w)
{
sem_post(&w->sem);
}
#define env_waitqueue_wait(w, condition) \
({ \
int __ret = 0; \
@ -604,12 +517,6 @@ static inline bool env_bit_test(int nr, const volatile unsigned long *addr)
return !!(*byte & mask);
}
/* *** SCHEDULING *** */
static inline void env_schedule(void)
{
sched_yield();
}
static inline int env_in_interrupt(void)
{
return 0;
@ -689,14 +596,6 @@ struct env_timeval {
uint64_t sec, usec;
};
static inline void env_gettimeofday(struct env_timeval *tv)
{
struct timeval t;
gettimeofday(&t, NULL);
tv->sec = t.tv_sec;
tv->usec = t.tv_usec;
}
uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
#define ENV_PRIu64 "lu"

View File

@ -127,11 +127,6 @@ void env_allocator_destroy(env_allocator *allocator)
}
}
uint32_t env_allocator_item_count(env_allocator *allocator)
{
return env_atomic_read(&allocator->count);
}
/* *** COMPLETION *** */
void env_completion_init(env_completion *completion)
@ -173,26 +168,12 @@ int env_mutex_lock_interruptible(env_mutex *mutex)
return mock();
}
int env_mutex_trylock(env_mutex *mutex)
{
function_called();
check_expected_ptr(mutex);
return mock();
}
void env_mutex_unlock(env_mutex *mutex)
{
function_called();
check_expected_ptr(mutex);
}
int env_mutex_is_locked(env_mutex *mutex)
{
function_called();
check_expected_ptr(mutex);
return mock();
}
int env_rmutex_init(env_rmutex *rmutex)
{
function_called();
@ -213,26 +194,12 @@ int env_rmutex_lock_interruptible(env_rmutex *rmutex)
return mock();
}
int env_rmutex_trylock(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
return mock();
}
void env_rmutex_unlock(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
}
int env_rmutex_is_locked(env_rmutex *rmutex)
{
function_called();
check_expected_ptr(rmutex);
return mock();
}
int env_rwsem_init(env_rwsem *s)
{
function_called();
@ -298,11 +265,6 @@ void env_atomic_sub(int i, env_atomic *a)
*a -= i;
}
bool env_atomic_sub_and_test(int i, env_atomic *a)
{
return *a-=i == 0;
}
void env_atomic_inc(env_atomic *a)
{
++*a;
@ -318,11 +280,6 @@ bool env_atomic_dec_and_test(env_atomic *a)
return --*a == 0;
}
bool env_atomic_inc_and_test(env_atomic *a)
{
return ++*a == 0;
}
int env_atomic_add_return(int i, env_atomic *a)
{
return *a+=i;
@ -422,18 +379,6 @@ void env_spinlock_unlock(env_spinlock *l)
check_expected_ptr(l);
}
void env_spinlock_lock_irq(env_spinlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_spinlock_unlock_irq(env_spinlock *l)
{
function_called();
check_expected_ptr(l);
}
void env_rwlock_init(env_rwlock *l)
{
function_called();
@ -464,20 +409,6 @@ void env_rwlock_write_unlock(env_rwlock *l)
check_expected_ptr(l);
}
void env_waitqueue_init(env_waitqueue *w)
{
w->completed = false;
w->waiting = false;
w->co = NULL;
}
void env_waitqueue_wake_up(env_waitqueue *w)
{
w->completed = true;
if (!w->waiting || !w->co)
return;
}
void env_bit_set(int nr, volatile void *addr)
{
char *byte = (char *) addr + (nr >> 3);
@ -510,11 +441,6 @@ void env_touch_softlockup_wd(void)
function_called();
}
void env_schedule(void)
{
function_called();
}
int env_in_interrupt(void)
{
function_called();

View File

@ -120,8 +120,6 @@ void *env_allocator_new(env_allocator *allocator);
void env_allocator_del(env_allocator *allocator, void *item);
uint32_t env_allocator_item_count(env_allocator *allocator);
/* *** MUTEX *** */
typedef struct {
@ -134,12 +132,8 @@ void env_mutex_lock(env_mutex *mutex);
int env_mutex_lock_interruptible(env_mutex *mutex);
int env_mutex_trylock(env_mutex *mutex);
void env_mutex_unlock(env_mutex *mutex);
int env_mutex_is_locked(env_mutex *mutex);
/* *** RECURSIVE MUTEX *** */
typedef env_mutex env_rmutex;
@ -150,12 +144,8 @@ void env_rmutex_lock(env_rmutex *rmutex);
int env_rmutex_lock_interruptible(env_rmutex *rmutex);
int env_rmutex_trylock(env_rmutex *rmutex);
void env_rmutex_unlock(env_rmutex *rmutex);
int env_rmutex_is_locked(env_rmutex *rmutex);
/* *** RW SEMAPHORE *** */
typedef struct {
pthread_rwlock_t lock;
@ -175,8 +165,6 @@ void env_rwsem_down_write(env_rwsem *s);
int env_rwsem_down_write_trylock(env_rwsem *s);
int env_rwsem_is_locked(env_rwsem *s);
/* *** ATOMIC VARIABLES *** */
typedef int env_atomic;
@ -191,16 +179,12 @@ void env_atomic_add(int i, env_atomic *a);
void env_atomic_sub(int i, env_atomic *a);
bool env_atomic_sub_and_test(int i, env_atomic *a);
void env_atomic_inc(env_atomic *a);
void env_atomic_dec(env_atomic *a);
bool env_atomic_dec_and_test(env_atomic *a);
bool env_atomic_inc_and_test(env_atomic *a);
int env_atomic_add_return(int i, env_atomic *a);
int env_atomic_sub_return(int i, env_atomic *a);
@ -253,10 +237,6 @@ void env_spinlock_lock(env_spinlock *l);
void env_spinlock_unlock(env_spinlock *l);
void env_spinlock_lock_irq(env_spinlock *l);
void env_spinlock_unlock_irq(env_spinlock *l);
#define env_spinlock_lock_irqsave(l, flags) \
env_spinlock_lock(l); (void)flags;
@ -286,10 +266,6 @@ typedef struct {
Coroutine *co;
} env_waitqueue;
void env_waitqueue_init(env_waitqueue *w);
void env_waitqueue_wake_up(env_waitqueue *w);
#define env_waitqueue_wait(w, condition) \
({ \
int __ret = 0; \
@ -314,8 +290,6 @@ bool env_bit_test(int nr, const volatile unsigned long *addr);
void env_touch_softlockup_wd(void);
void env_schedule(void);
int env_in_interrupt(void);
uint64_t env_get_tick_count(void);