Allocator structures cacheline alignment

Align atomic fields to a different cacheline, so no false sharing between
CPUs occur.

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
This commit is contained in:
Kozlowski Mateusz 2021-03-12 15:28:57 +01:00
parent 94dc9048c7
commit c751974ae0
2 changed files with 13 additions and 7 deletions

View File

@ -14,16 +14,16 @@ struct _env_allocator {
/*!< Memory pool ID unique name */
char *name;
/*!< Size of specific item of memory pool */
uint32_t item_size;
/*!< OS handle to memory pool */
struct kmem_cache *kmem_cache;
/*!< Number of currently allocated items in pool */
atomic_t count;
struct cas_reserve_pool *rpool;
/*!< Size of specific item of memory pool */
uint32_t item_size;
/*!< Number of currently allocated items in pool */
atomic_t count __attribute__((aligned(64)));
};
static inline size_t env_allocator_align(size_t size)

View File

@ -25,11 +25,17 @@
#define CAS_DEBUG_PARAM(format, ...)
#endif
/* This is currently 24B padded/force aligned to 32B.
* With a 64B cacheline this means two structs on different cores may
* invalidate each other. This shouldn't happen between different physical
* CPUs and cause false sharing though, since with an even number of cores
* per CPU same cacheline shouldn't be polluted from the other physical CPU.
* */
struct _cas_reserve_pool_per_cpu {
spinlock_t lock;
struct list_head list;
atomic_t count;
};
} __attribute__((__aligned__(32)));
struct cas_reserve_pool {
uint32_t limit;