Merge pull request #33 from micrakow/master

Added sample env files and makefile to be used by functional tests
This commit is contained in:
Michał Mielewczyk 2019-01-08 13:32:17 +01:00 committed by GitHub
commit e6dc62e09a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 1046 additions and 4 deletions

View File

@ -82,14 +82,14 @@ void ocf_io_get(struct ocf_io *io)
{ {
struct ocf_io_meta *io_meta = ocf_io_get_meta(io); struct ocf_io_meta *io_meta = ocf_io_get_meta(io);
atomic_inc_return(&io_meta->ref_count); env_atomic_inc_return(&io_meta->ref_count);
} }
void ocf_io_put(struct ocf_io *io) void ocf_io_put(struct ocf_io *io)
{ {
struct ocf_io_meta *io_meta = ocf_io_get_meta(io); struct ocf_io_meta *io_meta = ocf_io_get_meta(io);
if (atomic_dec_return(&io_meta->ref_count)) if (env_atomic_dec_return(&io_meta->ref_count))
return; return;
env_allocator_del(io->obj->type->allocator, env_allocator_del(io->obj->type->allocator,

40
tests/functional/Makefile Executable file
View File

@ -0,0 +1,40 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
PWD=$(shell pwd)
OCFDIR=$(PWD)/../../
ADAPTERDIR=$(PWD)/test_adapter
SRCDIR=$(ADAPTERDIR)/src
INCDIR=$(ADAPTERDIR)/include
CC=gcc
CFLAGS=-g -Wall -I$(INCDIR) -I$(SRCDIR)
LDFLAGS=-pthread
SRC=$(shell find $(SRCDIR) -name \*.c)
OBJS=$(patsubst %.c, %.o, $(SRC))
OCFLIB=libocf.so
all: sync $(OBJS) $(OCFLIB)
$(OCFLIB): $(OBJS)
$(CC) -shared -o $@ $(CFLAGS) $^ -fPIC $(LDFLAGS)
%.o: %.c
$(CC) -c $(CFLAGS) -o $@ -fPIC $^ $(LDFLAGS)
sync:
$(MAKE) -C $(OCFDIR) inc O=$(ADAPTERDIR)
$(MAKE) -C $(OCFDIR) src O=$(ADAPTERDIR)
clean:
rm -rf $(OCFLIB) $(OBJS)
distclean:
rm -rf $(OCFLIB) $(OBJS)
rm -rf $(SRCDIR)/ocf
rm -rf $(INCDIR)/ocf
.PHONY: all clean

12
tests/functional/lib.py Normal file
View File

@ -0,0 +1,12 @@
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from ctypes import *
lib = None
#TODO Consider changing lib to singleton
def LoadOcfLib():
global lib
lib = cdll.LoadLibrary('./libocf.so')

View File

@ -0,0 +1,142 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_env.h"
#include <sched.h>
#include <execinfo.h>
struct _env_allocator {
/*!< Memory pool ID unique name */
char *name;
/*!< Size of specific item of memory pool */
uint32_t item_size;
/*!< Number of currently allocated items in pool */
env_atomic count;
};
static inline size_t env_allocator_align(size_t size)
{
if (size <= 2)
return size;
return (1ULL << 32) >> __builtin_clz(size - 1);
}
struct _env_allocator_item {
uint32_t flags;
uint32_t cpu;
char data[];
};
void *env_allocator_new(env_allocator *allocator)
{
struct _env_allocator_item *item = NULL;
item = calloc(1, allocator->item_size);
if (item) {
item->cpu = 0;
env_atomic_inc(&allocator->count);
}
return &item->data;
}
env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...)
{
char name[OCF_ALLOCATOR_NAME_MAX] = { '\0' };
int result, error = -1;
va_list args;
env_allocator *allocator = calloc(1, sizeof(*allocator));
if (!allocator) {
error = __LINE__;
goto err;
}
allocator->item_size = size + sizeof(struct _env_allocator_item);
/* Format allocator name */
va_start(args, fmt_name);
result = vsnprintf(name, sizeof(name), fmt_name, args);
va_end(args);
if ((result > 0) && (result < sizeof(name))) {
allocator->name = strdup(name);
if (!allocator->name) {
error = __LINE__;
goto err;
}
} else {
/* Formated string name exceed max allowed size of name */
error = __LINE__;
goto err;
}
return allocator;
err:
printf("Cannot create memory allocator, ERROR %d", error);
env_allocator_destroy(allocator);
return NULL;
}
void env_allocator_del(env_allocator *allocator, void *obj)
{
struct _env_allocator_item *item =
container_of(obj, struct _env_allocator_item, data);
env_atomic_dec(&allocator->count);
free(item);
}
void env_allocator_destroy(env_allocator *allocator)
{
if (allocator) {
if (env_atomic_read(&allocator->count)) {
printf("Not all objects deallocated\n");
ENV_WARN(true, OCF_PREFIX_SHORT" Cleanup problem\n");
}
free(allocator->name);
free(allocator);
}
}
uint32_t env_allocator_item_count(env_allocator *allocator)
{
return env_atomic_read(&allocator->count);
}
/* *** DEBUGING *** */
#define ENV_TRACE_DEPTH 16
void env_stack_trace(void)
{
void *trace[ENV_TRACE_DEPTH];
char **messages = NULL;
int i, size;
size = backtrace(trace, ENV_TRACE_DEPTH);
messages = backtrace_symbols(trace, size);
printf("[stack trace]>>>\n");
for (i = 0; i < size; ++i)
printf("%s\n", messages[i]);
printf("<<<[stack trace]\n");
free(messages);
}
/* *** CRC *** */
uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len)
{
return crc32(crc, data, len);
}

View File

@ -0,0 +1,658 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_ENV_H__
#define __OCF_ENV_H__
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_GNU
#define __USE_GNU
#endif
#include <linux/limits.h>
#include <linux/stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdarg.h>
#include <stddef.h>
#include <string.h>
#include <pthread.h>
#include <assert.h>
#include <semaphore.h>
#include <errno.h>
#include <limits.h>
#include <unistd.h>
#include <inttypes.h>
#include <sys/time.h>
#include <sys/param.h>
#include <zlib.h>
#include "ocf_env_list.h"
#include "ocf_env_headers.h"
/* linux sector 512-bytes */
#define ENV_SECTOR_SHIFT 9
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef uint64_t sector_t;
#define __packed __attribute__((packed))
#define __aligned(x) __attribute__((aligned(x)))
#define likely(cond) __builtin_expect(!!(cond), 1)
#define unlikely(cond) __builtin_expect(!!(cond), 0)
#define min(a,b) MIN(a,b)
#define OCF_ALLOCATOR_NAME_MAX 128
#define PAGE_SIZE 4096
/* *** MEMORY MANAGEMENT *** */
#define ENV_MEM_NORMAL 0
#define ENV_MEM_NOIO 0
#define ENV_MEM_ATOMIC 0
static inline void *env_malloc(size_t size, int flags)
{
return malloc(size);
}
static inline void *env_zalloc(size_t size, int flags)
{
void *ptr = malloc(size);
if (ptr)
memset(ptr, 0, size);
return ptr;
}
static inline void env_free(const void *ptr)
{
free((void *)ptr);
}
static inline void *env_vmalloc(size_t size)
{
return malloc(size);
}
static inline void *env_vzalloc(size_t size)
{
return env_zalloc(size, 0);
}
static inline void env_vfree(const void *ptr)
{
free((void *)ptr);
}
static inline uint64_t env_get_free_memory(void)
{
return sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
/* *** ALLOCATOR *** */
typedef struct _env_allocator env_allocator;
env_allocator *env_allocator_create(uint32_t size, const char *fmt_name, ...);
void env_allocator_destroy(env_allocator *allocator);
void *env_allocator_new(env_allocator *allocator);
void env_allocator_del(env_allocator *allocator, void *item);
uint32_t env_allocator_item_count(env_allocator *allocator);
/* *** MUTEX *** */
typedef struct {
pthread_mutex_t m;
} env_mutex;
#define env_cond_resched() ({})
static inline int env_mutex_init(env_mutex *mutex)
{
if(pthread_mutex_init(&mutex->m, NULL))
return 1;
return 0;
}
static inline void env_mutex_lock(env_mutex *mutex)
{
pthread_mutex_lock(&mutex->m);
}
static inline int env_mutex_lock_interruptible(env_mutex *mutex)
{
env_mutex_lock(mutex);
return 0;
}
static inline int env_mutex_trylock(env_mutex *mutex)
{
if (pthread_mutex_trylock(&mutex->m) == 0)
return 1;
return 0;
}
static inline void env_mutex_unlock(env_mutex *mutex)
{
pthread_mutex_unlock(&mutex->m);
}
static inline int env_mutex_is_locked(env_mutex *mutex)
{
if (env_mutex_trylock(mutex)) {
env_mutex_unlock(mutex);
return 1;
}
return 0;
}
/* *** RECURSIVE MUTEX *** */
typedef env_mutex env_rmutex;
static inline int env_rmutex_init(env_rmutex *rmutex)
{
env_mutex_init(rmutex);
return 0;
}
static inline void env_rmutex_lock(env_rmutex *rmutex)
{
env_mutex_lock(rmutex);
}
static inline int env_rmutex_lock_interruptible(env_rmutex *rmutex)
{
return env_mutex_lock_interruptible(rmutex);
}
static inline int env_rmutex_trylock(env_rmutex *rmutex)
{
return env_mutex_trylock(rmutex);
}
static inline void env_rmutex_unlock(env_rmutex *rmutex)
{
env_mutex_unlock(rmutex);
}
static inline int env_rmutex_is_locked(env_rmutex *rmutex)
{
return env_mutex_is_locked(rmutex);
}
/* *** RW SEMAPHORE *** */
typedef struct {
pthread_rwlock_t lock;
} env_rwsem;
static inline int env_rwsem_init(env_rwsem *s)
{
return pthread_rwlock_init(&s->lock, NULL);
}
static inline void env_rwsem_up_read(env_rwsem *s)
{
pthread_rwlock_unlock(&s->lock);
}
static inline void env_rwsem_down_read(env_rwsem *s)
{
pthread_rwlock_rdlock(&s->lock);
}
static inline int env_rwsem_down_read_trylock(env_rwsem *s)
{
int result = pthread_rwlock_tryrdlock(&s->lock);
if (result == 0)
return 1;
else
return 0;
}
static inline void env_rwsem_up_write(env_rwsem *s)
{
pthread_rwlock_unlock(&s->lock);
}
static inline void env_rwsem_down_write(env_rwsem *s)
{
pthread_rwlock_wrlock(&s->lock);
}
static inline int env_rwsem_down_write_trylock(env_rwsem *s)
{
int result = pthread_rwlock_trywrlock(&s->lock);
if (result == 0)
return 1;
else
return 0;
}
static inline int env_rwsem_is_locked(env_rwsem *s)
{
if (env_rwsem_down_read_trylock(s)) {
env_rwsem_up_read(s);
return 0;
}
return 1;
}
static inline int env_rwsem_down_write_interruptible(env_rwsem *s)
{
env_rwsem_down_write(s);
return 0;
}
static inline int env_rwsem_down_read_interruptible(env_rwsem *s)
{
env_rwsem_down_read(s);
return 0;
}
/* *** COMPLETION *** */
struct completion {
sem_t sem;
};
typedef struct completion env_completion;
static inline void env_completion_init(env_completion *completion)
{
sem_init(&completion->sem, 0, 0);
}
static inline void env_completion_wait(env_completion *completion)
{
sem_wait(&completion->sem);
}
static inline void env_completion_complete(env_completion *completion)
{
sem_post(&completion->sem);
}
static inline void env_completion_complete_and_exit(
env_completion *completion, int ret)
{
env_completion_complete(completion); /* TODO */
}
/* *** ATOMIC VARIABLES *** */
typedef struct {
volatile int counter;
} env_atomic;
typedef struct {
volatile long counter;
} env_atomic64;
static inline int env_atomic_read(const env_atomic *a)
{
return a->counter; /* TODO */
}
static inline void env_atomic_set(env_atomic *a, int i)
{
a->counter = i; /* TODO */
}
static inline void env_atomic_add(int i, env_atomic *a)
{
__sync_add_and_fetch(&a->counter, i);
}
static inline void env_atomic_sub(int i, env_atomic *a)
{
__sync_sub_and_fetch(&a->counter, i);
}
static inline bool env_atomic_sub_and_test(int i, env_atomic *a)
{
return __sync_sub_and_fetch(&a->counter, i) == 0;
}
static inline void env_atomic_inc(env_atomic *a)
{
env_atomic_add(1, a);
}
static inline void env_atomic_dec(env_atomic *a)
{
env_atomic_sub(1, a);
}
static inline bool env_atomic_dec_and_test(env_atomic *a)
{
return __sync_sub_and_fetch(&a->counter, 1) == 0;
}
static inline bool env_atomic_inc_and_test(env_atomic *a)
{
return __sync_add_and_fetch(&a->counter, 1) == 0;
}
static inline int env_atomic_add_return(int i, env_atomic *a)
{
return __sync_add_and_fetch(&a->counter, i);
}
static inline int env_atomic_sub_return(int i, env_atomic *a)
{
return __sync_sub_and_fetch(&a->counter, i);
}
static inline int env_atomic_inc_return(env_atomic *a)
{
return env_atomic_add_return(1, a);
}
static inline int env_atomic_dec_return(env_atomic *a)
{
return env_atomic_sub_return(1, a);
}
static inline int env_atomic_cmpxchg(env_atomic *a, int old, int new_value)
{
return __sync_val_compare_and_swap(&a->counter, old, new_value);
}
static inline int env_atomic_add_unless(env_atomic *a, int i, int u)
{
int c, old;
c = env_atomic_read(a);
for (;;) {
if (unlikely(c == (u)))
break;
old = env_atomic_cmpxchg((a), c, c + (i));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
static inline long env_atomic64_read(const env_atomic64 *a)
{
return a->counter; /* TODO */
}
static inline void env_atomic64_set(env_atomic64 *a, long i)
{
a->counter = i; /* TODO */
}
static inline void env_atomic64_add(long i, env_atomic64 *a)
{
__sync_add_and_fetch(&a->counter, i);
}
static inline void env_atomic64_sub(long i, env_atomic64 *a)
{
__sync_sub_and_fetch(&a->counter, i);
}
static inline void env_atomic64_inc(env_atomic64 *a)
{
env_atomic64_add(1, a);
}
static inline void env_atomic64_dec(env_atomic64 *a)
{
env_atomic64_sub(1, a);
}
static inline long env_atomic64_cmpxchg(env_atomic64 *a, long old, long new)
{
return __sync_val_compare_and_swap(&a->counter, old, new);
}
/* *** SPIN LOCKS *** */
typedef struct {
pthread_spinlock_t lock;
} env_spinlock;
static inline void env_spinlock_init(env_spinlock *l)
{
pthread_spin_init(&l->lock, 0);
}
static inline void env_spinlock_lock(env_spinlock *l)
{
pthread_spin_lock(&l->lock);
}
static inline void env_spinlock_unlock(env_spinlock *l)
{
pthread_spin_unlock(&l->lock);
}
static inline void env_spinlock_lock_irq(env_spinlock *l)
{
env_spinlock_lock(l);
}
static inline void env_spinlock_unlock_irq(env_spinlock *l)
{
env_spinlock_unlock(l);
}
#define env_spinlock_lock_irqsave(l, flags) \
(void)flags; \
env_spinlock_lock(l)
#define env_spinlock_unlock_irqrestore(l, flags) \
(void)flags; \
env_spinlock_unlock(l)
/* *** RW LOCKS *** */
typedef struct {
pthread_rwlock_t lock;
} env_rwlock;
static inline void env_rwlock_init(env_rwlock *l)
{
pthread_rwlock_init(&l->lock, NULL);
}
static inline void env_rwlock_read_lock(env_rwlock *l)
{
pthread_rwlock_rdlock(&l->lock);
}
static inline void env_rwlock_read_unlock(env_rwlock *l)
{
pthread_rwlock_unlock(&l->lock);
}
static inline void env_rwlock_write_lock(env_rwlock *l)
{
pthread_rwlock_wrlock(&l->lock);
}
static inline void env_rwlock_write_unlock(env_rwlock *l)
{
pthread_rwlock_unlock(&l->lock);
}
/* *** WAITQUEUE *** */
typedef struct {
sem_t sem;
} env_waitqueue;
static inline void env_waitqueue_init(env_waitqueue *w)
{
sem_init(&w->sem, 0, 0);
}
static inline void env_waitqueue_wake_up(env_waitqueue *w)
{
sem_post(&w->sem);
}
#define env_waitqueue_wait(w, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
sem_wait(&w.sem); \
__ret = __ret; \
})
/* *** BIT OPERATIONS *** */
static inline void env_bit_set(int nr, volatile void *addr)
{
char *byte = (char *)addr + (nr >> 3);
char mask = 1 << (nr & 7);
__sync_or_and_fetch(byte, mask);
}
static inline void env_bit_clear(int nr, volatile void *addr)
{
char *byte = (char *)addr + (nr >> 3);
char mask = 1 << (nr & 7);
mask = ~mask;
__sync_and_and_fetch(byte, mask);
}
static inline bool env_bit_test(int nr, const volatile unsigned long *addr)
{
const char *byte = (char *)addr + (nr >> 3);
char mask = 1 << (nr & 7);
return !!(*byte & mask);
}
/* *** SCHEDULING *** */
static inline void env_schedule(void)
{
sched_yield();
}
static inline int env_in_interrupt(void)
{
return 0;
}
/* TODO(mwysocza): Is this really needed? */
static inline void env_touch_lockup_watchdog(void)
{
}
static inline uint64_t env_get_tick_count(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
static inline uint64_t env_ticks_to_msecs(uint64_t j)
{
return j;
}
static inline uint64_t env_ticks_to_secs(uint64_t j)
{
return j / 1000;
}
static inline uint64_t env_secs_to_ticks(uint64_t j)
{
return j * 1000;
}
/* *** SORTING *** */
static inline void env_sort(void *base, size_t num, size_t size,
int (*cmp_fn)(const void *, const void *),
void (*swap_fn)(void *, void *, int size))
{
qsort(base, num, size, cmp_fn);
}
/* *** STRING OPERATIONS *** */
#define env_memset(dest, dmax, val) ({ \
memset(dest, val, dmax); \
0; \
})
#define env_memcpy(dest, dmax, src, slen) ({ \
memcpy(dest, src, min(dmax, slen)); \
0; \
})
#define env_memcmp(s1, s1max, s2, s2max, diff) ({ \
*diff = memcmp(s1, s2, min(s1max, s2max)); \
0; \
})
#define env_strdup strndup
#define env_strnlen(s, smax) strnlen(s, smax)
#define env_strncmp strncmp
#define env_strncpy(dest, dmax, src, slen) ({ \
strncpy(dest, src, min(dmax, slen)); \
0; \
})
/* *** DEBUGING *** */
#define ENV_WARN(cond, fmt...) printf(fmt)
#define ENV_WARN_ON(cond) ;
#define ENV_WARN_ONCE(cond, fmt...) ENV_WARN(cond, fmt)
#define ENV_BUG() assert(0)
#define ENV_BUG_ON(cond) assert(!(cond))
#define container_of(ptr, type, member) ({ \
const typeof(((type *)0)->member)*__mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
static inline void env_msleep(uint64_t n)
{
usleep(n * 1000);
}
struct env_timeval {
uint64_t sec, usec;
};
static inline void env_gettimeofday(struct env_timeval *tv)
{
struct timeval t;
gettimeofday(&t, NULL);
tv->sec = t.tv_sec;
tv->usec = t.tv_usec;
}
uint32_t env_crc32(uint32_t crc, uint8_t const *data, size_t len);
#define ENV_PRIu64 "lu"
#endif /* __OCF_ENV_H__ */

View File

@ -0,0 +1,22 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_ENV_HEADERS_H__
#define __OCF_ENV_HEADERS_H__
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
/* TODO: Move prefix printing to context logger. */
#define OCF_LOGO "Intel(R) OCF"
#define OCF_PREFIX_SHORT "[" OCF_LOGO "] "
#define OCF_PREFIX_LONG "Open CAS Framework"
#define OCF_VERSION_MAIN 1
#define OCF_VERSION_MAJOR 1
#define OCF_VERSION_MINOR 1
#endif /* __OCF_ENV_HEADERS_H__ */

View File

@ -0,0 +1,168 @@
/*
* Copyright(c) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef __OCF_ENV_LIST__
#define __OCF_ENV_LIST__
#define LIST_POISON1 ((void *)0x101)
#define LIST_POISON2 ((void *)0x202)
/**
* List entry structure mimicking linux kernel based one.
*/
struct list_head {
struct list_head *next;
struct list_head *prev;
};
/**
* start an empty list
*/
#define INIT_LIST_HEAD(l) { (l)->prev = l; (l)->next = l; }
/**
* Add item to list head.
* @param it list entry to be added
* @param l1 list main node (head)
*/
static inline void list_add(struct list_head *it, struct list_head *l1)
{
it->prev = l1;
it->next = l1->next;
l1->next->prev = it;
l1->next = it;
}
/**
* Add item it to tail.
* @param it list entry to be added
* @param l1 list main node (head)
*/
static inline void list_add_tail(struct list_head *it, struct list_head *l1)
{
it->prev = l1->prev;
it->next = l1;
l1->prev->next = it;
l1->prev = it;
}
/**
* check if a list is empty (return true)
* @param l1 list main node (head)
*/
static inline int list_empty(struct list_head *l1)
{
return l1->next == l1;
}
/**
* delete an entry from a list
* @param it list entry to be deleted
*/
static inline void list_del(struct list_head *it)
{
it->next->prev = it->prev;
it->prev->next = it->next;
}
/**
* Move element to list head.
* @param it list entry to be moved
* @param l1 list main node (head)
*/
static inline void list_move(struct list_head *it, struct list_head *l1)
{
list_del(it);
list_add(it, l1);
}
/**
* Move element to list tail.
* @param it list entry to be moved
* @param l1 list main node (head)
*/
static inline void list_move_tail(struct list_head *it, struct list_head *l1)
{
list_del(it);
list_add_tail(it, l1);
}
/**
* Extract an entry.
* @param list_head_i list head item, from which entry is extracted
* @param item_type type (struct) of list entry
* @param field_name name of list_head field within item_type
*/
#define list_entry(list_head_i, item_type, field_name) \
(item_type *)(((void*)(list_head_i)) - offsetof(item_type, field_name))
#define list_first_entry(list_head_i, item_type, field_name) \
list_entry((list_head_i)->next, item_type, field_name)
/**
* @param iterator uninitialized list_head pointer, to be used as iterator
* @param plist list head (main node)
*/
#define list_for_each(iterator, plist) \
for (iterator = (plist)->next; \
(iterator)->next != (plist)->next; \
iterator = (iterator)->next)
/**
* Safe version of list_for_each which works even if entries are deleted during
* loop.
* @param iterator uninitialized list_head pointer, to be used as iterator
* @param q another uninitialized list_head, used as helper
* @param plist list head (main node)
*/
/*
* Algorithm handles situation, where q is deleted.
* consider in example 3 element list with header h:
*
* h -> 1 -> 2 -> 3 ->
*1. i q
*
*2. i q
*
*3. q i
*/
#define list_for_each_safe(iterator, q, plist) \
for (iterator = (q = (plist)->next->next)->prev; \
(q) != (plist)->next; \
iterator = (q = (q)->next)->prev)
#define _list_entry_helper(item, head, field_name) \
list_entry(head, typeof(*item), field_name)
/**
* Iterate over list entries.
* @param list pointer to list item (iterator)
* @param plist pointer to list_head item
* @param field_name name of list_head field in list entry
*/
#define list_for_each_entry(item, plist, field_name) \
for (item = _list_entry_helper(item, (plist)->next, field_name); \
_list_entry_helper(item, (item)->field_name.next, field_name) !=\
_list_entry_helper(item, (plist)->next, field_name); \
item = _list_entry_helper(item, (item)->field_name.next, field_name))
/**
* Safe version of list_for_each_entry which works even if entries are deleted
* during loop.
* @param list pointer to list item (iterator)
* @param q another pointer to list item, used as helper
* @param plist pointer to list_head item
* @param field_name name of list_head field in list entry
*/
#define list_for_each_entry_safe(item, q, plist, field_name) \
for (item = _list_entry_helper(item, (plist)->next, field_name), \
q = _list_entry_helper(item, (item)->field_name.next, field_name); \
_list_entry_helper(item, (item)->field_name.next, field_name) != \
_list_entry_helper(item, (plist)->next, field_name); \
item = q, q = _list_entry_helper(q, (q)->field_name.next, field_name))
#endif // __OCF_ENV_LIST__