mio concurrency

Signed-off-by: Adam Rutkowski <adam.j.rutkowski@intel.com>
This commit is contained in:
Adam Rutkowski 2021-04-03 21:26:41 -05:00 committed by Kozlowski Mateusz
parent 69c3c6761b
commit 06f3c937c3
5 changed files with 211 additions and 28 deletions

View File

@ -0,0 +1,137 @@
/*
* Copyright(c) 2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#include "ocf_concurrency.h"
#include "../metadata/metadata_io.h"
#include "../ocf_priv.h"
#include "../ocf_request.h"
#include "../utils/utils_alock.h"
#include "../utils/utils_cache_line.h"
struct ocf_mio_alock
{
unsigned first_page;
unsigned num_pages;
};
static bool ocf_mio_lock_line_needs_lock(struct ocf_alock *alock,
struct ocf_request *req, unsigned index)
{
return true;
}
static bool ocf_mio_lock_line_is_acting(struct ocf_alock *alock,
struct ocf_request *req, unsigned index)
{
return true;
}
static bool ocf_mio_lock_line_is_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, int rw)
{
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
if (rw == OCF_WRITE)
return env_bit_test(index, &m_req->map);
else
return false;
}
static void ocf_mio_lock_line_mark_locked(struct ocf_alock *alock,
struct ocf_request *req, unsigned index, int rw, bool locked)
{
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
if (rw == OCF_READ)
return;
if (locked)
env_bit_set(index, &m_req->map);
else
env_bit_clear(index, &m_req->map);
}
static ocf_cache_line_t ocf_mio_lock_line_get_entry(
struct ocf_alock *alock, struct ocf_request *req,
unsigned index)
{
struct ocf_mio_alock *mio_alock = (void*)alock + ocf_alock_obj_size();
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
unsigned page = m_req->page + index;
ENV_BUG_ON(page < mio_alock->first_page);
ENV_BUG_ON(page >= mio_alock->first_page + mio_alock->num_pages);
return page - mio_alock->first_page;
}
static struct ocf_alock_lock_cbs ocf_mio_conc_cbs = {
.line_needs_lock = ocf_mio_lock_line_needs_lock,
.line_is_acting = ocf_mio_lock_line_is_acting,
.line_is_locked = ocf_mio_lock_line_is_locked,
.line_mark_locked = ocf_mio_lock_line_mark_locked,
.line_get_entry = ocf_mio_lock_line_get_entry
};
int ocf_mio_async_lock(struct ocf_alock *alock,
struct metadata_io_request *m_req,
ocf_req_async_lock_cb cmpl)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_mio_conc_cbs;
return ocf_alock_lock_wr(alock, cbs, &m_req->req, cmpl);
}
void ocf_mio_async_unlock(struct ocf_alock *alock,
struct metadata_io_request *m_req)
{
struct ocf_alock_lock_cbs *cbs =
&ocf_mio_conc_cbs;
ocf_alock_unlock_wr(alock, cbs, &m_req->req);
m_req->map = 0;
}
#define ALLOCATOR_NAME_FMT "ocf_%s_mio_concurrency"
#define ALLOCATOR_NAME_MAX (sizeof(ALLOCATOR_NAME_FMT) + OCF_CACHE_NAME_SIZE)
int ocf_mio_concurrency_init(struct ocf_alock **self,
unsigned first_page, unsigned num_pages,
ocf_cache_t cache)
{
struct ocf_alock *alock;
struct ocf_mio_alock *mio_alock;
size_t base_size = ocf_alock_obj_size();
char name[ALLOCATOR_NAME_MAX];
int ret;
ret = snprintf(name, sizeof(name), ALLOCATOR_NAME_FMT,
ocf_cache_get_name(cache));
if (ret < 0)
return ret;
if (ret >= ALLOCATOR_NAME_MAX)
return -ENOSPC;
alock = env_vzalloc(base_size + sizeof(struct ocf_mio_alock));
if (!alock)
return -OCF_ERR_NO_MEM;
ret = ocf_alock_init_inplace(alock, num_pages, name, cache);
if (ret)
return ret;
mio_alock = (void*)alock + base_size;
mio_alock->first_page = first_page;
mio_alock->num_pages = num_pages;
*self = alock;
return 0;
}
void ocf_mio_concurrency_deinit(struct ocf_alock **self)
{
ocf_alock_deinit(self);
}

View File

@ -0,0 +1,24 @@
/*
* Copyright(c) 2021-2021 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause-Clear
*/
#ifndef OCF_COLLISION_UPDATE_CONCURRENCY_H_
#define OCF_COLLISION_UPDATE_CONCURRENCY_H_
#include "../utils/utils_alock.h"
int ocf_mio_async_lock(struct ocf_alock *alock,
struct metadata_io_request *m_req,
ocf_req_async_lock_cb cmpl);
void ocf_mio_async_unlock(struct ocf_alock *alock,
struct metadata_io_request *m_req);
int ocf_mio_concurrency_init(struct ocf_alock **self,
unsigned first_page, unsigned num_pages,
ocf_cache_t cache);
void ocf_mio_concurrency_deinit(struct ocf_alock **self);
#endif

View File

@ -54,6 +54,7 @@ struct metadata_io_request {
env_atomic finished; env_atomic finished;
uint32_t page; uint32_t page;
uint32_t count; uint32_t count;
uint64_t map;
}; };
/* /*

View File

@ -62,78 +62,94 @@ struct ocf_alock {
}; };
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries, size_t ocf_alock_obj_size(void)
{
return sizeof(struct ocf_alock);
}
int ocf_alock_init_inplace(struct ocf_alock *self, unsigned num_entries,
const char* name, ocf_cache_t cache) const char* name, ocf_cache_t cache)
{ {
uint32_t i; uint32_t i;
int error = 0; int error = 0;
struct ocf_alock *alock;
OCF_DEBUG_TRACE(cache); OCF_DEBUG_TRACE(cache);
alock = env_vzalloc(sizeof(*alock)); self->cache = cache;
if (!alock) { self->num_entries = num_entries;
error = __LINE__;
goto exit_err;
}
alock->cache = cache; error = env_mutex_init(&self->lock);
alock->num_entries = num_entries;
error = env_mutex_init(&alock->lock);
if (error) { if (error) {
error = __LINE__; error = __LINE__;
goto rwsem_err; goto rwsem_err;
} }
alock->access = env_vzalloc(num_entries * sizeof(alock->access[0])); self->access = env_vzalloc(num_entries * sizeof(self->access[0]));
if (!alock->access) { if (!self->access) {
error = __LINE__; error = __LINE__;
goto allocation_err; goto allocation_err;
} }
alock->allocator = env_allocator_create(sizeof(struct ocf_alock_waiter), name, false); self->allocator = env_allocator_create(sizeof(struct ocf_alock_waiter), name, false);
if (!alock->allocator) { if (!self->allocator) {
error = __LINE__; error = __LINE__;
goto allocation_err; goto allocation_err;
} }
/* Init concurrency control table */ /* Init concurrency control table */
for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) { for (i = 0; i < _WAITERS_LIST_ENTRIES; i++) {
INIT_LIST_HEAD(&alock->waiters_lsts[i].head); INIT_LIST_HEAD(&self->waiters_lsts[i].head);
error = env_spinlock_init(&alock->waiters_lsts[i].lock); error = env_spinlock_init(&self->waiters_lsts[i].lock);
if (error) { if (error) {
error = __LINE__; error = __LINE__;
goto spinlock_err; goto spinlock_err;
} }
} }
*self = alock;
return 0; return 0;
spinlock_err: spinlock_err:
while (i--) while (i--)
env_spinlock_destroy(&alock->waiters_lsts[i].lock); env_spinlock_destroy(&self->waiters_lsts[i].lock);
allocation_err: allocation_err:
if (alock->allocator) if (self->allocator)
env_allocator_destroy(alock->allocator); env_allocator_destroy(self->allocator);
if (alock->access) if (self->access)
env_vfree(alock->access); env_vfree(self->access);
rwsem_err: rwsem_err:
env_mutex_destroy(&alock->lock); env_mutex_destroy(&self->lock);
exit_err:
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, " ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
"ERROR %d", error); "ERROR %d", error);
if (alock)
return -1;
}
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, ocf_cache_t cache)
{
struct ocf_alock *alock;
int ret;
OCF_DEBUG_TRACE(cache);
alock = env_vzalloc(sizeof(*alock));
if (!alock)
return -OCF_ERR_NO_MEM;
ret = ocf_alock_init_inplace(alock, num_entries,
name, cache);
if (!ret)
*self = alock;
else
env_vfree(alock); env_vfree(alock);
*self = NULL; return ret;
return -1;
} }
void ocf_alock_deinit(struct ocf_alock **self) void ocf_alock_deinit(struct ocf_alock **self)

View File

@ -87,6 +87,11 @@ bool ocf_alock_is_locked_exclusively(struct ocf_alock *alock,
uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock); uint32_t ocf_alock_waitlist_count(struct ocf_alock *alock);
size_t ocf_alock_obj_size(void);
int ocf_alock_init_inplace(struct ocf_alock *self, unsigned num_entries,
const char* name, ocf_cache_t cache);
int ocf_alock_init(struct ocf_alock **self, unsigned num_entries, int ocf_alock_init(struct ocf_alock **self, unsigned num_entries,
const char* name, ocf_cache_t cache); const char* name, ocf_cache_t cache);