Align structures to cacheline

Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
This commit is contained in:
Kozlowski Mateusz 2021-07-05 11:33:50 +02:00
parent f863bb627f
commit f494448f97
7 changed files with 33 additions and 28 deletions

View File

@ -51,14 +51,14 @@ struct ocf_cache_info {
uint32_t dirty;
/*!< Dirty blocks within cache (in cache lines) */
uint64_t dirty_for;
/*!< How long there are dirty cache lines (in seconds) */
uint32_t dirty_initial;
/*!< Dirty blocks within cache that where there when switching
* out of WB mode
*/
uint64_t dirty_for;
/*!< How long there are dirty cache lines (in seconds) */
ocf_cache_mode_t cache_mode;
/*!< Current cache mode */

View File

@ -263,14 +263,6 @@ struct ocf_mngt_cache_config {
bool metadata_volatile;
/**
* @brief Backfill configuration
*/
struct {
uint32_t max_queue_size;
uint32_t queue_unblock_size;
} backfill;
/**
* @brief Start cache and keep it locked
*
@ -288,6 +280,14 @@ struct ocf_mngt_cache_config {
* @brief If set, try to submit all I/O in fast path.
*/
bool use_submit_io_fast;
/**
* @brief Backfill configuration
*/
struct {
uint32_t max_queue_size;
uint32_t queue_unblock_size;
} backfill;
};
/**
@ -367,16 +367,16 @@ struct ocf_mngt_cache_device_config {
*/
struct ocf_volume_uuid uuid;
/**
* @brief Cache volume type
*/
uint8_t volume_type;
/**
* @brief Cache line size
*/
ocf_cache_line_size_t cache_line_size;
/**
* @brief Cache volume type
*/
uint8_t volume_type;
/**
* @brief Automatically open core volumes when loading cache
*

View File

@ -132,9 +132,11 @@ struct ocf_cache {
bool use_submit_io_fast;
struct ocf_trace trace;
struct {
struct ocf_trace trace;
struct ocf_async_lock lock;
struct ocf_async_lock lock;
} __attribute__((aligned(64)));
// This should be on it's own cacheline ideally
env_atomic last_access_ms;
};

View File

@ -143,8 +143,6 @@ struct ocf_request {
ctx_data_t *cp_data;
/*!< Copy of request data */
ocf_req_cache_mode_t cache_mode;
uint64_t byte_position;
/*!< LBA byte position of request in core domain */
@ -196,6 +194,8 @@ struct ocf_request {
uint8_t lock_idx : OCF_METADATA_GLOBAL_LOCK_IDX_BITS;
/* !< Selected global metadata read lock */
ocf_req_cache_mode_t cache_mode;
log_sid_t sid;
/*!< Tracing sequence ID */

View File

@ -56,16 +56,19 @@ struct ocf_alock_waiters_list {
};
struct ocf_alock {
ocf_cache_t cache;
env_mutex lock;
env_atomic *access;
env_atomic waiting;
struct {
ocf_cache_t cache;
env_mutex lock;
env_atomic waiting;
} __attribute__((__aligned__(64)));
ocf_cache_line_t num_entries;
env_atomic *access;
env_allocator *allocator;
struct ocf_alock_lock_cbs *cbs;
struct ocf_alock_waiters_list waiters_lsts[_WAITERS_LIST_ENTRIES];
};
} __attribute__((__aligned__(64)));
void ocf_alock_mark_index_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, bool locked)

View File

@ -52,18 +52,18 @@ class CacheConfig(Structure):
("_cache_line_size", c_uint64),
("_metadata_layout", c_uint32),
("_metadata_volatile", c_bool),
("_backfill", Backfill),
("_locked", c_bool),
("_pt_unaligned_io", c_bool),
("_use_submit_io_fast", c_bool),
("_backfill", Backfill),
]
class CacheDeviceConfig(Structure):
_fields_ = [
("_uuid", Uuid),
("_volume_type", c_uint8),
("_cache_line_size", c_uint64),
("_volume_type", c_uint8),
("_force", c_bool),
("_min_free_ram", c_uint64),
("_perform_test", c_bool),

View File

@ -24,8 +24,8 @@ class CacheInfo(Structure):
("inactive", _Inactive),
("occupancy", c_uint32),
("dirty", c_uint32),
("dirty_initial", c_uint32),
("dirty_for", c_uint64),
("dirty_initial", c_uint32),
("cache_mode", c_uint32),
("fallback_pt", _FallbackPt),
("cleaning_policy", c_uint32),