Change alock API to include slow/fast lock callbacks
Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com>
This commit is contained in:
parent
f49e9d2d6a
commit
ce316cc67c
@ -25,24 +25,6 @@ static bool ocf_cl_lock_line_is_acting(struct ocf_alock *alock,
|
|||||||
return req->map[index].status != LOOKUP_MISS;
|
return req->map[index].status != LOOKUP_MISS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ocf_cl_lock_line_is_locked(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw)
|
|
||||||
{
|
|
||||||
if (rw == OCF_WRITE)
|
|
||||||
return req->map[index].wr_locked;
|
|
||||||
else
|
|
||||||
return req->map[index].rd_locked;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ocf_cl_lock_line_mark_locked(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw, bool locked)
|
|
||||||
{
|
|
||||||
if (rw == OCF_WRITE)
|
|
||||||
req->map[index].wr_locked = locked;
|
|
||||||
else
|
|
||||||
req->map[index].rd_locked = locked;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ocf_cache_line_t ocf_cl_lock_line_get_entry(
|
static ocf_cache_line_t ocf_cl_lock_line_get_entry(
|
||||||
struct ocf_alock *alock, struct ocf_request *req,
|
struct ocf_alock *alock, struct ocf_request *req,
|
||||||
unsigned index)
|
unsigned index)
|
||||||
@ -50,12 +32,118 @@ static ocf_cache_line_t ocf_cl_lock_line_get_entry(
|
|||||||
return req->map[index].coll_idx;
|
return req->map[index].coll_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ocf_cl_lock_line_fast(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, int rw)
|
||||||
|
{
|
||||||
|
int32_t i;
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
int ret = OCF_LOCK_ACQUIRED;
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
if (!ocf_cl_lock_line_needs_lock(alock, req, i)) {
|
||||||
|
/* nothing to lock */
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
|
||||||
|
|
||||||
|
if (rw == OCF_WRITE) {
|
||||||
|
if (ocf_alock_trylock_entry_wr(alock, entry)) {
|
||||||
|
/* cache entry locked */
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, true);
|
||||||
|
} else {
|
||||||
|
/* Not possible to lock all cachelines */
|
||||||
|
ret = OCF_LOCK_NOT_ACQUIRED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (ocf_alock_trylock_entry_rd_idle(alock, entry)) {
|
||||||
|
/* cache entry locked */
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, true);
|
||||||
|
} else {
|
||||||
|
/* Not possible to lock all cachelines */
|
||||||
|
ret = OCF_LOCK_NOT_ACQUIRED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if request is locked */
|
||||||
|
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
||||||
|
/* Request is not locked, discard acquired locks */
|
||||||
|
for (; i >= 0; i--) {
|
||||||
|
if (!ocf_cl_lock_line_needs_lock(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
|
||||||
|
if (ocf_alock_is_index_locked(alock, req, i)) {
|
||||||
|
|
||||||
|
if (rw == OCF_WRITE) {
|
||||||
|
ocf_alock_unlock_one_wr(alock, entry);
|
||||||
|
} else {
|
||||||
|
ocf_alock_unlock_one_rd(alock, entry);
|
||||||
|
}
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ocf_cl_lock_line_slow(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl)
|
||||||
|
{
|
||||||
|
int32_t i;
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
|
||||||
|
if (!ocf_cl_lock_line_needs_lock(alock, req, i)) {
|
||||||
|
/* nothing to lock */
|
||||||
|
env_atomic_dec(&req->lock_remaining);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
|
||||||
|
|
||||||
|
|
||||||
|
if (rw == OCF_WRITE) {
|
||||||
|
if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) {
|
||||||
|
/* lock not acquired and not added to wait list */
|
||||||
|
ret = -OCF_ERR_NO_MEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!ocf_alock_lock_one_rd(alock, entry, cmpl, req, i)) {
|
||||||
|
/* lock not acquired and not added to wait list */
|
||||||
|
ret = -OCF_ERR_NO_MEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (; i >= 0; i--) {
|
||||||
|
if (!ocf_cl_lock_line_needs_lock(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
ocf_alock_waitlist_remove_entry(alock, req, i, entry, rw);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = {
|
static struct ocf_alock_lock_cbs ocf_cline_conc_cbs = {
|
||||||
.line_needs_lock = ocf_cl_lock_line_needs_lock,
|
.lock_entries_fast = ocf_cl_lock_line_fast,
|
||||||
.line_is_acting = ocf_cl_lock_line_is_acting,
|
.lock_entries_slow = ocf_cl_lock_line_slow
|
||||||
.line_is_locked = ocf_cl_lock_line_is_locked,
|
|
||||||
.line_mark_locked = ocf_cl_lock_line_mark_locked,
|
|
||||||
.line_get_entry = ocf_cl_lock_line_get_entry
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool ocf_cache_line_try_lock_rd(struct ocf_alock *alock,
|
bool ocf_cache_line_try_lock_rd(struct ocf_alock *alock,
|
||||||
@ -95,17 +183,48 @@ int ocf_req_async_lock_wr(struct ocf_alock *alock,
|
|||||||
|
|
||||||
void ocf_req_unlock_rd(struct ocf_alock *alock, struct ocf_request *req)
|
void ocf_req_unlock_rd(struct ocf_alock *alock, struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_alock_unlock_rd(alock, req);
|
int32_t i;
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
if (!ocf_cl_lock_line_is_acting(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!ocf_alock_is_index_locked(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
|
||||||
|
ocf_alock_unlock_one_rd(alock, entry);
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ocf_req_unlock_wr(struct ocf_alock *alock, struct ocf_request *req)
|
void ocf_req_unlock_wr(struct ocf_alock *alock, struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_alock_unlock_wr(alock, req);
|
int32_t i;
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
if (!ocf_cl_lock_line_is_acting(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!ocf_alock_is_index_locked(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry = ocf_cl_lock_line_get_entry(alock, req, i);
|
||||||
|
|
||||||
|
ocf_alock_unlock_one_wr(alock, entry);
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ocf_req_unlock(struct ocf_alock *alock, struct ocf_request *req)
|
void ocf_req_unlock(struct ocf_alock *alock, struct ocf_request *req)
|
||||||
{
|
{
|
||||||
ocf_alock_unlock(alock, req);
|
if (req->alock_rw == OCF_WRITE)
|
||||||
|
ocf_req_unlock_wr(alock, req);
|
||||||
|
else
|
||||||
|
ocf_req_unlock_rd(alock, req);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
|
bool ocf_cache_line_are_waiters(struct ocf_alock *alock,
|
||||||
|
@ -16,43 +16,7 @@ struct ocf_mio_alock
|
|||||||
unsigned num_pages;
|
unsigned num_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool ocf_mio_lock_line_needs_lock(struct ocf_alock *alock,
|
static ocf_cache_line_t ocf_mio_lock_get_entry(
|
||||||
struct ocf_request *req, unsigned index)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ocf_mio_lock_line_is_acting(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ocf_mio_lock_line_is_locked(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw)
|
|
||||||
{
|
|
||||||
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
|
|
||||||
|
|
||||||
if (rw == OCF_WRITE)
|
|
||||||
return env_bit_test(index, &m_req->map);
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ocf_mio_lock_line_mark_locked(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw, bool locked)
|
|
||||||
{
|
|
||||||
struct metadata_io_request *m_req = (struct metadata_io_request *)req;
|
|
||||||
|
|
||||||
if (rw == OCF_READ)
|
|
||||||
return;
|
|
||||||
if (locked)
|
|
||||||
env_bit_set(index, &m_req->map);
|
|
||||||
else
|
|
||||||
env_bit_clear(index, &m_req->map);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ocf_cache_line_t ocf_mio_lock_line_get_entry(
|
|
||||||
struct ocf_alock *alock, struct ocf_request *req,
|
struct ocf_alock *alock, struct ocf_request *req,
|
||||||
unsigned index)
|
unsigned index)
|
||||||
{
|
{
|
||||||
@ -66,12 +30,77 @@ static ocf_cache_line_t ocf_mio_lock_line_get_entry(
|
|||||||
return page - mio_alock->first_page;
|
return page - mio_alock->first_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ocf_mio_lock_fast(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, int rw)
|
||||||
|
{
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
int ret = OCF_LOCK_ACQUIRED;
|
||||||
|
int32_t i;
|
||||||
|
ENV_BUG_ON(rw != OCF_WRITE);
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
entry = ocf_mio_lock_get_entry(alock, req, i);
|
||||||
|
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
|
||||||
|
|
||||||
|
if (ocf_alock_trylock_entry_wr(alock, entry)) {
|
||||||
|
/* cache entry locked */
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, true);
|
||||||
|
} else {
|
||||||
|
/* Not possible to lock all cachelines */
|
||||||
|
ret = OCF_LOCK_NOT_ACQUIRED;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if request is locked */
|
||||||
|
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
||||||
|
/* Request is not locked, discard acquired locks */
|
||||||
|
for (; i >= 0; i--) {
|
||||||
|
entry = ocf_mio_lock_get_entry(alock, req, i);
|
||||||
|
|
||||||
|
if (ocf_alock_is_index_locked(alock, req, i)) {
|
||||||
|
ocf_alock_unlock_one_wr(alock, entry);
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ocf_mio_lock_slow(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl)
|
||||||
|
{
|
||||||
|
int32_t i;
|
||||||
|
ocf_cache_line_t entry;
|
||||||
|
int ret = 0;
|
||||||
|
ENV_BUG_ON(rw != OCF_WRITE);
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
entry = ocf_mio_lock_get_entry(alock, req, i);
|
||||||
|
ENV_BUG_ON(ocf_alock_is_index_locked(alock, req, i));
|
||||||
|
|
||||||
|
if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) {
|
||||||
|
/* lock not acquired and not added to wait list */
|
||||||
|
ret = -OCF_ERR_NO_MEM;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (; i >= 0; i--) {
|
||||||
|
entry = ocf_mio_lock_get_entry(alock, req, i);
|
||||||
|
ocf_alock_waitlist_remove_entry(alock, req, i, entry, OCF_WRITE);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static struct ocf_alock_lock_cbs ocf_mio_conc_cbs = {
|
static struct ocf_alock_lock_cbs ocf_mio_conc_cbs = {
|
||||||
.line_needs_lock = ocf_mio_lock_line_needs_lock,
|
.lock_entries_fast = ocf_mio_lock_fast,
|
||||||
.line_is_acting = ocf_mio_lock_line_is_acting,
|
.lock_entries_slow = ocf_mio_lock_slow
|
||||||
.line_is_locked = ocf_mio_lock_line_is_locked,
|
|
||||||
.line_mark_locked = ocf_mio_lock_line_mark_locked,
|
|
||||||
.line_get_entry = ocf_mio_lock_line_get_entry
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int ocf_mio_async_lock(struct ocf_alock *alock,
|
int ocf_mio_async_lock(struct ocf_alock *alock,
|
||||||
@ -84,8 +113,21 @@ int ocf_mio_async_lock(struct ocf_alock *alock,
|
|||||||
void ocf_mio_async_unlock(struct ocf_alock *alock,
|
void ocf_mio_async_unlock(struct ocf_alock *alock,
|
||||||
struct metadata_io_request *m_req)
|
struct metadata_io_request *m_req)
|
||||||
{
|
{
|
||||||
ocf_alock_unlock_wr(alock, &m_req->req);
|
ocf_cache_line_t entry;
|
||||||
m_req->map = 0;
|
struct ocf_request *req = &m_req->req;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < req->core_line_count; i++) {
|
||||||
|
if (!ocf_alock_is_index_locked(alock, req, i))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
entry = ocf_mio_lock_get_entry(alock, req, i);
|
||||||
|
|
||||||
|
ocf_alock_unlock_one_wr(alock, entry);
|
||||||
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
m_req->alock_status = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -107,15 +149,17 @@ int ocf_mio_concurrency_init(struct ocf_alock **self,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if (ret >= ALLOCATOR_NAME_MAX)
|
if (ret >= ALLOCATOR_NAME_MAX)
|
||||||
return -ENOSPC;
|
return -OCF_ERR_NO_MEM;
|
||||||
|
|
||||||
alock = env_vzalloc(base_size + sizeof(struct ocf_mio_alock));
|
alock = env_vzalloc(base_size + sizeof(struct ocf_mio_alock));
|
||||||
if (!alock)
|
if (!alock)
|
||||||
return -OCF_ERR_NO_MEM;
|
return -OCF_ERR_NO_MEM;
|
||||||
|
|
||||||
ret = ocf_alock_init_inplace(alock, num_pages, name, &ocf_mio_conc_cbs, cache);
|
ret = ocf_alock_init_inplace(alock, num_pages, name, &ocf_mio_conc_cbs, cache);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
env_free(alock);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
mio_alock = (void*)alock + base_size;
|
mio_alock = (void*)alock + base_size;
|
||||||
mio_alock->first_page = first_page;
|
mio_alock->first_page = first_page;
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef OCF_COLLISION_UPDATE_CONCURRENCY_H_
|
#ifndef OCF_MIO_CONCURRENCY_H_
|
||||||
#define OCF_COLLISION_UPDATE_CONCURRENCY_H_
|
#define OCF_MIO_CONCURRENCY_H_
|
||||||
|
|
||||||
#include "../utils/utils_alock.h"
|
#include "../utils/utils_alock.h"
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ err_alloc:
|
|||||||
|
|
||||||
static int _ocf_read_generic_do(struct ocf_request *req)
|
static int _ocf_read_generic_do(struct ocf_request *req)
|
||||||
{
|
{
|
||||||
if (ocf_engine_is_miss(req) && req->map->rd_locked) {
|
if (ocf_engine_is_miss(req) && req->alock_rw == OCF_READ) {
|
||||||
/* Miss can be handled only on write locks.
|
/* Miss can be handled only on write locks.
|
||||||
* Need to switch to PT
|
* Need to switch to PT
|
||||||
*/
|
*/
|
||||||
|
@ -611,6 +611,7 @@ bool evp_lru_can_evict(ocf_cache_t cache)
|
|||||||
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
uint32_t evp_lru_req_clines(struct ocf_request *req,
|
||||||
struct ocf_user_part *part, uint32_t cline_no)
|
struct ocf_user_part *part, uint32_t cline_no)
|
||||||
{
|
{
|
||||||
|
struct ocf_alock* alock;
|
||||||
struct ocf_lru_iter iter;
|
struct ocf_lru_iter iter;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
ocf_cache_line_t cline;
|
ocf_cache_line_t cline;
|
||||||
@ -680,10 +681,10 @@ uint32_t evp_lru_req_clines(struct ocf_request *req,
|
|||||||
req->map[req_idx].status = LOOKUP_REMAPPED;
|
req->map[req_idx].status = LOOKUP_REMAPPED;
|
||||||
ocf_engine_patch_req_info(cache, req, req_idx);
|
ocf_engine_patch_req_info(cache, req, req_idx);
|
||||||
|
|
||||||
if (cl_write_lock)
|
alock = ocf_cache_line_concurrency(iter.cache);
|
||||||
req->map[req_idx].wr_locked = true;
|
|
||||||
else
|
ocf_alock_mark_index_locked(alock, req, req_idx, true);
|
||||||
req->map[req_idx].rd_locked = true;
|
req->alock_rw = cl_write_lock ? OCF_WRITE : OCF_READ;
|
||||||
|
|
||||||
++req_idx;
|
++req_idx;
|
||||||
++i;
|
++i;
|
||||||
|
@ -348,7 +348,7 @@ static uint32_t metadata_io_max_page(ocf_cache_t cache)
|
|||||||
uint32_t volume_max_io_pages = ocf_volume_get_max_io_size(
|
uint32_t volume_max_io_pages = ocf_volume_get_max_io_size(
|
||||||
&cache->device->volume) / PAGE_SIZE;
|
&cache->device->volume) / PAGE_SIZE;
|
||||||
struct metadata_io_request *m_req;
|
struct metadata_io_request *m_req;
|
||||||
uint32_t request_map_capacity_pages = sizeof(m_req->map) * 8;
|
uint32_t request_map_capacity_pages = sizeof(m_req->alock_status) * 8;
|
||||||
|
|
||||||
return OCF_MIN(volume_max_io_pages, request_map_capacity_pages);
|
return OCF_MIN(volume_max_io_pages, request_map_capacity_pages);
|
||||||
}
|
}
|
||||||
@ -458,6 +458,7 @@ static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
|
|||||||
m_req->req.info.internal = true;
|
m_req->req.info.internal = true;
|
||||||
m_req->req.rw = dir;
|
m_req->req.rw = dir;
|
||||||
m_req->req.map = LIST_POISON1;
|
m_req->req.map = LIST_POISON1;
|
||||||
|
m_req->req.alock_status = (uint8_t*)&m_req->alock_status;
|
||||||
|
|
||||||
/* If req_count == io_count and count is not multiple of
|
/* If req_count == io_count and count is not multiple of
|
||||||
* max_count, for last we can allocate data smaller that
|
* max_count, for last we can allocate data smaller that
|
||||||
|
@ -56,7 +56,7 @@ struct metadata_io_request {
|
|||||||
env_atomic finished;
|
env_atomic finished;
|
||||||
uint32_t page;
|
uint32_t page;
|
||||||
uint32_t count;
|
uint32_t count;
|
||||||
uint64_t map;
|
uint64_t alock_status;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -43,10 +43,19 @@ static inline size_t ocf_req_sizeof_map(struct ocf_request *req)
|
|||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline size_t ocf_req_sizeof_alock_status(struct ocf_request *req)
|
||||||
|
{
|
||||||
|
uint32_t lines = req->core_line_count;
|
||||||
|
size_t size = (lines * sizeof(uint8_t));
|
||||||
|
|
||||||
|
ENV_BUG_ON(lines == 0);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
|
int ocf_req_allocator_init(struct ocf_ctx *ocf_ctx)
|
||||||
{
|
{
|
||||||
ocf_ctx->resources.req = env_mpool_create(sizeof(struct ocf_request),
|
ocf_ctx->resources.req = env_mpool_create(sizeof(struct ocf_request),
|
||||||
sizeof(struct ocf_map_info), ENV_MEM_NORMAL, ocf_req_size_128,
|
sizeof(struct ocf_map_info) + sizeof(uint8_t), ENV_MEM_NORMAL, ocf_req_size_128,
|
||||||
false, NULL, "ocf_req", true);
|
false, NULL, "ocf_req", true);
|
||||||
|
|
||||||
if (ocf_ctx->resources.req == NULL)
|
if (ocf_ctx->resources.req == NULL)
|
||||||
@ -90,6 +99,7 @@ struct ocf_request *ocf_req_new(ocf_queue_t queue, ocf_core_t core,
|
|||||||
|
|
||||||
if (map_allocated) {
|
if (map_allocated) {
|
||||||
req->map = req->__map;
|
req->map = req->__map;
|
||||||
|
req->alock_status = (uint8_t*)&req->__map[core_line_count];
|
||||||
req->alloc_core_line_count = core_line_count;
|
req->alloc_core_line_count = core_line_count;
|
||||||
} else {
|
} else {
|
||||||
req->alloc_core_line_count = 1;
|
req->alloc_core_line_count = 1;
|
||||||
@ -131,12 +141,15 @@ int ocf_req_alloc_map(struct ocf_request *req)
|
|||||||
if (req->map)
|
if (req->map)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
req->map = env_zalloc(ocf_req_sizeof_map(req), ENV_MEM_NOIO);
|
req->map = env_zalloc(ocf_req_sizeof_map(req) +
|
||||||
|
ocf_req_sizeof_alock_status(req), ENV_MEM_NOIO);
|
||||||
if (!req->map) {
|
if (!req->map) {
|
||||||
req->error = -OCF_ERR_NO_MEM;
|
req->error = -OCF_ERR_NO_MEM;
|
||||||
return -OCF_ERR_NO_MEM;
|
return -OCF_ERR_NO_MEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
req->alock_status = &((uint8_t*)req->map)[ocf_req_sizeof_map(req)];
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,12 +61,6 @@ struct ocf_map_info {
|
|||||||
uint16_t status : 8;
|
uint16_t status : 8;
|
||||||
/*!< Traverse or mapping status - HIT, MISS, etc... */
|
/*!< Traverse or mapping status - HIT, MISS, etc... */
|
||||||
|
|
||||||
uint16_t rd_locked : 1;
|
|
||||||
/*!< Indicates if cache line is locked for READ access */
|
|
||||||
|
|
||||||
uint16_t wr_locked : 1;
|
|
||||||
/*!< Indicates if cache line is locked for WRITE access */
|
|
||||||
|
|
||||||
uint16_t invalid : 1;
|
uint16_t invalid : 1;
|
||||||
/*!< This bit indicates that mapping is invalid */
|
/*!< This bit indicates that mapping is invalid */
|
||||||
|
|
||||||
@ -222,9 +216,15 @@ struct ocf_request {
|
|||||||
|
|
||||||
struct ocf_req_discard_info discard;
|
struct ocf_req_discard_info discard;
|
||||||
|
|
||||||
|
uint32_t alock_rw;
|
||||||
|
/*!< Read/Write mode for alock*/
|
||||||
|
|
||||||
|
uint8_t *alock_status;
|
||||||
|
/*!< Mapping for locked/unlocked alock entries */
|
||||||
|
|
||||||
struct ocf_map_info *map;
|
struct ocf_map_info *map;
|
||||||
|
|
||||||
struct ocf_map_info __map[];
|
struct ocf_map_info __map[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
|
typedef void (*ocf_req_end_t)(struct ocf_request *req, int error);
|
||||||
|
@ -3,13 +3,10 @@
|
|||||||
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
* SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||||
*/
|
*/
|
||||||
|
|
||||||
//#include "ocf_concurrency.h"
|
|
||||||
#include "../ocf_cache_priv.h"
|
#include "../ocf_cache_priv.h"
|
||||||
#include "../ocf_priv.h"
|
#include "../ocf_priv.h"
|
||||||
#include "../ocf_request.h"
|
#include "../ocf_request.h"
|
||||||
#include "utils_alock.h"
|
#include "utils_alock.h"
|
||||||
//#include "../utils/utils_cache_line.h"
|
|
||||||
//#include "../utils/utils_realloc.h"
|
|
||||||
|
|
||||||
#define OCF_CACHE_CONCURRENCY_DEBUG 0
|
#define OCF_CACHE_CONCURRENCY_DEBUG 0
|
||||||
|
|
||||||
@ -63,6 +60,21 @@ struct ocf_alock {
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void ocf_alock_mark_index_locked(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, unsigned index, bool locked)
|
||||||
|
{
|
||||||
|
if (locked)
|
||||||
|
env_bit_set(index, req->alock_status);
|
||||||
|
else
|
||||||
|
env_bit_clear(index, req->alock_status);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ocf_alock_is_index_locked(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, unsigned index)
|
||||||
|
{
|
||||||
|
return env_bit_test(index, (unsigned long*)req->alock_status);
|
||||||
|
}
|
||||||
|
|
||||||
size_t ocf_alock_obj_size(void)
|
size_t ocf_alock_obj_size(void)
|
||||||
{
|
{
|
||||||
return sizeof(struct ocf_alock);
|
return sizeof(struct ocf_alock);
|
||||||
@ -122,9 +134,8 @@ allocation_err:
|
|||||||
if (self->access)
|
if (self->access)
|
||||||
env_vfree(self->access);
|
env_vfree(self->access);
|
||||||
|
|
||||||
rwsem_err:
|
|
||||||
env_mutex_destroy(&self->lock);
|
env_mutex_destroy(&self->lock);
|
||||||
|
rwsem_err:
|
||||||
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
|
ocf_cache_log(cache, log_err, "Cannot initialize cache concurrency, "
|
||||||
"ERROR %d", error);
|
"ERROR %d", error);
|
||||||
|
|
||||||
@ -249,13 +260,10 @@ bool ocf_alock_trylock_entry_wr(struct ocf_alock *alock,
|
|||||||
int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
|
int prev = env_atomic_cmpxchg(access, OCF_CACHE_LINE_ACCESS_IDLE,
|
||||||
OCF_CACHE_LINE_ACCESS_WR);
|
OCF_CACHE_LINE_ACCESS_WR);
|
||||||
|
|
||||||
if (prev == OCF_CACHE_LINE_ACCESS_IDLE)
|
return prev == OCF_CACHE_LINE_ACCESS_IDLE;
|
||||||
return true;
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
|
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
|
||||||
ocf_cache_line_t entry)
|
ocf_cache_line_t entry)
|
||||||
{
|
{
|
||||||
env_atomic *access = &alock->access[entry];
|
env_atomic *access = &alock->access[entry];
|
||||||
@ -287,8 +295,10 @@ static inline void ocf_alock_unlock_entry_rd(struct ocf_alock *alock,
|
|||||||
{
|
{
|
||||||
env_atomic *access = &alock->access[entry];
|
env_atomic *access = &alock->access[entry];
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(access) == 0);
|
int v = env_atomic_read(access);
|
||||||
ENV_BUG_ON(env_atomic_read(access) == OCF_CACHE_LINE_ACCESS_WR);
|
|
||||||
|
ENV_BUG_ON(v == 0);
|
||||||
|
ENV_BUG_ON(v == OCF_CACHE_LINE_ACCESS_WR);
|
||||||
env_atomic_dec(access);
|
env_atomic_dec(access);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,8 +306,9 @@ static inline bool ocf_alock_trylock_entry_wr2wr(struct ocf_alock *alock,
|
|||||||
ocf_cache_line_t entry)
|
ocf_cache_line_t entry)
|
||||||
{
|
{
|
||||||
env_atomic *access = &alock->access[entry];
|
env_atomic *access = &alock->access[entry];
|
||||||
|
int v = env_atomic_read(access);
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
|
ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,8 +316,9 @@ static inline bool ocf_alock_trylock_entry_wr2rd(struct ocf_alock *alock,
|
|||||||
ocf_cache_line_t entry)
|
ocf_cache_line_t entry)
|
||||||
{
|
{
|
||||||
env_atomic *access = &alock->access[entry];
|
env_atomic *access = &alock->access[entry];
|
||||||
|
int v = env_atomic_read(access);
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(access) != OCF_CACHE_LINE_ACCESS_WR);
|
ENV_BUG_ON(v != OCF_CACHE_LINE_ACCESS_WR);
|
||||||
env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD);
|
env_atomic_set(access, OCF_CACHE_LINE_ACCESS_ONE_RD);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -352,7 +364,7 @@ static void ocf_alock_entry_locked(struct ocf_alock *alock,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
|
bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
|
||||||
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
||||||
void *req, uint32_t idx)
|
void *req, uint32_t idx)
|
||||||
{
|
{
|
||||||
@ -364,7 +376,7 @@ static inline bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
|
|||||||
|
|
||||||
if (ocf_alock_trylock_entry_wr(alock, entry)) {
|
if (ocf_alock_trylock_entry_wr(alock, entry)) {
|
||||||
/* lock was not owned by anyone */
|
/* lock was not owned by anyone */
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, true);
|
ocf_alock_mark_index_locked(alock, req, idx, true);
|
||||||
ocf_alock_entry_locked(alock, req, cmpl);
|
ocf_alock_entry_locked(alock, req, cmpl);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -397,7 +409,7 @@ unlock:
|
|||||||
ocf_alock_waitlist_unlock(alock, entry, flags);
|
ocf_alock_waitlist_unlock(alock, entry, flags);
|
||||||
|
|
||||||
if (!waiting) {
|
if (!waiting) {
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, true);
|
ocf_alock_mark_index_locked(alock, req, idx, true);
|
||||||
ocf_alock_entry_locked(alock, req, cmpl);
|
ocf_alock_entry_locked(alock, req, cmpl);
|
||||||
env_allocator_del(alock->allocator, waiter);
|
env_allocator_del(alock->allocator, waiter);
|
||||||
}
|
}
|
||||||
@ -409,7 +421,7 @@ unlock:
|
|||||||
* Attempt to lock cache entry for read.
|
* Attempt to lock cache entry for read.
|
||||||
* In case cache entry is locked, attempt to add caller on wait list.
|
* In case cache entry is locked, attempt to add caller on wait list.
|
||||||
*/
|
*/
|
||||||
static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
|
bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
|
||||||
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
||||||
void *req, uint32_t idx)
|
void *req, uint32_t idx)
|
||||||
{
|
{
|
||||||
@ -421,7 +433,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
|
|||||||
|
|
||||||
if( ocf_alock_trylock_entry_rd_idle(alock, entry)) {
|
if( ocf_alock_trylock_entry_rd_idle(alock, entry)) {
|
||||||
/* lock was not owned by anyone */
|
/* lock was not owned by anyone */
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, true);
|
ocf_alock_mark_index_locked(alock, req, idx, true);
|
||||||
ocf_alock_entry_locked(alock, req, cmpl);
|
ocf_alock_entry_locked(alock, req, cmpl);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -433,7 +445,7 @@ static inline bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
|
|||||||
/* Lock waiters list */
|
/* Lock waiters list */
|
||||||
ocf_alock_waitlist_lock(alock, entry, flags);
|
ocf_alock_waitlist_lock(alock, entry, flags);
|
||||||
|
|
||||||
if (!ocf_alock_waitlist_is_empty_locked(alock, entry)) {
|
if (ocf_alock_waitlist_is_empty_locked(alock, entry)) {
|
||||||
/* No waiters at the moment */
|
/* No waiters at the moment */
|
||||||
|
|
||||||
/* Check if read lock can be obtained */
|
/* Check if read lock can be obtained */
|
||||||
@ -459,7 +471,7 @@ unlock:
|
|||||||
ocf_alock_waitlist_unlock(alock, entry, flags);
|
ocf_alock_waitlist_unlock(alock, entry, flags);
|
||||||
|
|
||||||
if (!waiting) {
|
if (!waiting) {
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, true);
|
ocf_alock_mark_index_locked(alock, req, idx, true);
|
||||||
ocf_alock_entry_locked(alock, req, cmpl);
|
ocf_alock_entry_locked(alock, req, cmpl);
|
||||||
env_allocator_del(alock->allocator, waiter);
|
env_allocator_del(alock->allocator, waiter);
|
||||||
}
|
}
|
||||||
@ -477,7 +489,6 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
|
|||||||
{
|
{
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
bool exchanged = true;
|
bool exchanged = true;
|
||||||
uint32_t i = 0;
|
|
||||||
|
|
||||||
uint32_t idx = _WAITERS_LIST_ITEM(entry);
|
uint32_t idx = _WAITERS_LIST_ITEM(entry);
|
||||||
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
|
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
|
||||||
@ -515,14 +526,11 @@ static inline void ocf_alock_unlock_one_rd_common(struct ocf_alock *alock,
|
|||||||
ENV_BUG();
|
ENV_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
i++;
|
|
||||||
|
|
||||||
if (locked) {
|
if (locked) {
|
||||||
exchanged = false;
|
exchanged = false;
|
||||||
list_del(iter);
|
list_del(iter);
|
||||||
|
|
||||||
alock->cbs->line_mark_locked(alock, waiter->req, waiter->idx,
|
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
|
||||||
waiter->rw, true);
|
|
||||||
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
|
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
|
||||||
|
|
||||||
env_allocator_del(alock->allocator, waiter);
|
env_allocator_del(alock->allocator, waiter);
|
||||||
@ -566,7 +574,6 @@ void ocf_alock_unlock_one_rd(struct ocf_alock *alock,
|
|||||||
static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
|
static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
|
||||||
const ocf_cache_line_t entry)
|
const ocf_cache_line_t entry)
|
||||||
{
|
{
|
||||||
uint32_t i = 0;
|
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
bool exchanged = true;
|
bool exchanged = true;
|
||||||
|
|
||||||
@ -606,14 +613,11 @@ static inline void ocf_alock_unlock_one_wr_common(struct ocf_alock *alock,
|
|||||||
ENV_BUG();
|
ENV_BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
i++;
|
|
||||||
|
|
||||||
if (locked) {
|
if (locked) {
|
||||||
exchanged = false;
|
exchanged = false;
|
||||||
list_del(iter);
|
list_del(iter);
|
||||||
|
|
||||||
alock->cbs->line_mark_locked(alock, waiter->req, waiter->idx,
|
ocf_alock_mark_index_locked(alock, waiter->req, waiter->idx, true);
|
||||||
waiter->rw, true);
|
|
||||||
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
|
ocf_alock_entry_locked(alock, waiter->req, waiter->cmpl);
|
||||||
|
|
||||||
env_allocator_del(alock->allocator, waiter);
|
env_allocator_del(alock->allocator, waiter);
|
||||||
@ -648,10 +652,9 @@ void ocf_alock_unlock_one_wr(struct ocf_alock *alock,
|
|||||||
* Request can be assigned with lock asynchronously at any point of time,
|
* Request can be assigned with lock asynchronously at any point of time,
|
||||||
* so need to check lock state under a common lock.
|
* so need to check lock state under a common lock.
|
||||||
*/
|
*/
|
||||||
static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, int i, int rw)
|
struct ocf_request *req, ocf_cache_line_t entry, int i, int rw)
|
||||||
{
|
{
|
||||||
ocf_cache_line_t entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
uint32_t idx = _WAITERS_LIST_ITEM(entry);
|
uint32_t idx = _WAITERS_LIST_ITEM(entry);
|
||||||
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
|
struct ocf_alock_waiters_list *lst = &alock->waiters_lsts[idx];
|
||||||
struct list_head *iter, *next;
|
struct list_head *iter, *next;
|
||||||
@ -660,18 +663,19 @@ static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
|||||||
|
|
||||||
ocf_alock_waitlist_lock(alock, entry, flags);
|
ocf_alock_waitlist_lock(alock, entry, flags);
|
||||||
|
|
||||||
if (alock->cbs->line_is_locked(alock, req, i, rw)) {
|
if (ocf_alock_is_index_locked(alock, req, i)) {
|
||||||
if (rw == OCF_READ)
|
if (rw == OCF_READ)
|
||||||
ocf_alock_unlock_one_rd_common(alock, entry);
|
ocf_alock_unlock_one_rd_common(alock, entry);
|
||||||
else
|
else
|
||||||
ocf_alock_unlock_one_wr_common(alock, entry);
|
ocf_alock_unlock_one_wr_common(alock, entry);
|
||||||
alock->cbs->line_mark_locked(alock, req, i, rw, false);
|
ocf_alock_mark_index_locked(alock, req, i, false);
|
||||||
} else {
|
} else {
|
||||||
list_for_each_safe(iter, next, &lst->head) {
|
list_for_each_safe(iter, next, &lst->head) {
|
||||||
waiter = list_entry(iter, struct ocf_alock_waiter, item);
|
waiter = list_entry(iter, struct ocf_alock_waiter, item);
|
||||||
if (waiter->req == req) {
|
if (waiter->req == req) {
|
||||||
list_del(iter);
|
list_del(iter);
|
||||||
env_allocator_del(alock->allocator, waiter);
|
env_allocator_del(alock->allocator, waiter);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -679,365 +683,77 @@ static inline void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
|||||||
ocf_alock_waitlist_unlock(alock, entry, flags);
|
ocf_alock_waitlist_unlock(alock, entry, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to read-lock request without adding waiters. Function should be called
|
|
||||||
* under read lock, multiple threads may attempt to acquire the lock
|
|
||||||
* concurrently.
|
|
||||||
*/
|
|
||||||
static int ocf_alock_lock_rd_fast(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
int ret = OCF_LOCK_ACQUIRED;
|
|
||||||
|
|
||||||
OCF_DEBUG_RQ(req, "Lock");
|
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i)) {
|
|
||||||
/* nothing to lock */
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ));
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE));
|
|
||||||
|
|
||||||
if( ocf_alock_trylock_entry_rd_idle(alock, entry)) {
|
|
||||||
/* cache entry locked */
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_READ, true);
|
|
||||||
} else {
|
|
||||||
/* Not possible to lock all cachelines */
|
|
||||||
ret = OCF_LOCK_NOT_ACQUIRED;
|
|
||||||
OCF_DEBUG_RQ(req, "NO Lock, cache entry = %u", entry);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if request is locked */
|
|
||||||
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
|
||||||
/* Request is not locked, discard acquired locks */
|
|
||||||
for (; i >= 0; i--) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i)) {
|
|
||||||
/* nothing to discard */
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
|
|
||||||
if (alock->cbs->line_is_locked(alock, req, i, OCF_READ)) {
|
|
||||||
ocf_alock_unlock_one_rd(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read-lock request cache lines. Must be called under cacheline concurrency
|
|
||||||
* write lock.
|
|
||||||
*/
|
|
||||||
static int ocf_alock_lock_rd_slow(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
int ret = OCF_LOCK_NOT_ACQUIRED;
|
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
|
||||||
|
|
||||||
env_atomic_inc(&alock->waiting);
|
|
||||||
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
|
||||||
env_atomic_inc(&req->lock_remaining);
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i)) {
|
|
||||||
/* nothing to lock */
|
|
||||||
env_atomic_dec(&req->lock_remaining);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ));
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE));
|
|
||||||
|
|
||||||
if (!ocf_alock_lock_one_rd(alock, entry, cmpl, req, i)) {
|
|
||||||
/* lock not acquired and not added to wait list */
|
|
||||||
ret = -OCF_ERR_NO_MEM;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
|
|
||||||
ret = OCF_LOCK_ACQUIRED;
|
|
||||||
env_atomic_dec(&alock->waiting);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
err:
|
|
||||||
for (; i >= 0; i--) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ocf_alock_waitlist_remove_entry(alock, req, i ,OCF_READ);
|
|
||||||
}
|
|
||||||
env_atomic_set(&req->lock_remaining, 0);
|
|
||||||
env_atomic_dec(&alock->waiting);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
int ocf_alock_lock_rd(struct ocf_alock *alock,
|
int ocf_alock_lock_rd(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
||||||
{
|
{
|
||||||
int lock;
|
int lock, status;
|
||||||
|
|
||||||
lock = ocf_alock_lock_rd_fast(alock, req);
|
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||||
|
req->alock_rw = OCF_READ;
|
||||||
|
|
||||||
|
lock = alock->cbs->lock_entries_fast(alock, req, OCF_READ);
|
||||||
|
|
||||||
if (lock != OCF_LOCK_ACQUIRED) {
|
if (lock != OCF_LOCK_ACQUIRED) {
|
||||||
env_mutex_lock(&alock->lock);
|
env_mutex_lock(&alock->lock);
|
||||||
lock = ocf_alock_lock_rd_slow(alock, req, cmpl);
|
|
||||||
|
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||||
|
ENV_BUG_ON(!cmpl);
|
||||||
|
|
||||||
|
env_atomic_inc(&alock->waiting);
|
||||||
|
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
||||||
|
env_atomic_inc(&req->lock_remaining);
|
||||||
|
|
||||||
|
status = alock->cbs->lock_entries_slow(alock, req, OCF_READ, cmpl);
|
||||||
|
if (!status) {
|
||||||
|
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
|
||||||
|
lock = OCF_LOCK_ACQUIRED;
|
||||||
|
env_atomic_dec(&alock->waiting);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
env_atomic_set(&req->lock_remaining, 0);
|
||||||
|
env_atomic_dec(&alock->waiting);
|
||||||
|
}
|
||||||
env_mutex_unlock(&alock->lock);
|
env_mutex_unlock(&alock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to write-lock request without adding waiters. Function should be called
|
|
||||||
* under read lock, multiple threads may attempt to acquire the lock
|
|
||||||
* concurrently. */
|
|
||||||
static int ocf_alock_lock_wr_fast(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
int ret = OCF_LOCK_ACQUIRED;
|
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i)) {
|
|
||||||
/* nothing to lock */
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ));
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE));
|
|
||||||
|
|
||||||
if (ocf_alock_trylock_entry_wr(alock, entry)) {
|
|
||||||
/* cache entry locked */
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, true);
|
|
||||||
} else {
|
|
||||||
/* Not possible to lock all cachelines */
|
|
||||||
ret = OCF_LOCK_NOT_ACQUIRED;
|
|
||||||
OCF_DEBUG_RQ(req, "NO Lock, cache entry = %u", entry);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if request is locked */
|
|
||||||
if (ret == OCF_LOCK_NOT_ACQUIRED) {
|
|
||||||
/* Request is not locked, discard acquired locks */
|
|
||||||
for (; i >= 0; i--) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
|
|
||||||
if (alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) {
|
|
||||||
ocf_alock_unlock_one_wr(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Write-lock request cache lines. Must be called under cacheline concurrency
|
|
||||||
* write lock.
|
|
||||||
*/
|
|
||||||
static int ocf_alock_lock_wr_slow(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
int ret = OCF_LOCK_NOT_ACQUIRED;
|
|
||||||
|
|
||||||
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
|
||||||
ENV_BUG_ON(!cmpl);
|
|
||||||
|
|
||||||
env_atomic_inc(&alock->waiting);
|
|
||||||
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
|
||||||
env_atomic_inc(&req->lock_remaining);
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i)) {
|
|
||||||
/* nothing to lock */
|
|
||||||
env_atomic_dec(&req->lock_remaining);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ));
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE));
|
|
||||||
|
|
||||||
if (!ocf_alock_lock_one_wr(alock, entry, cmpl, req, i)) {
|
|
||||||
/* lock not acquired and not added to wait list */
|
|
||||||
ret = -OCF_ERR_NO_MEM;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
|
|
||||||
ret = OCF_LOCK_ACQUIRED;
|
|
||||||
env_atomic_dec(&alock->waiting);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
err:
|
|
||||||
for (; i >= 0; i--) {
|
|
||||||
if (!alock->cbs->line_needs_lock(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ocf_alock_waitlist_remove_entry(alock, req, i, OCF_WRITE);
|
|
||||||
}
|
|
||||||
env_atomic_set(&req->lock_remaining, 0);
|
|
||||||
env_atomic_dec(&alock->waiting);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ocf_alock_lock_wr(struct ocf_alock *alock,
|
int ocf_alock_lock_wr(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
struct ocf_request *req, ocf_req_async_lock_cb cmpl)
|
||||||
{
|
{
|
||||||
int lock;
|
int lock, status;
|
||||||
|
|
||||||
lock = ocf_alock_lock_wr_fast(alock, req);
|
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||||
|
req->alock_rw = OCF_WRITE;
|
||||||
|
lock = alock->cbs->lock_entries_fast(alock, req, OCF_WRITE);
|
||||||
|
|
||||||
if (lock != OCF_LOCK_ACQUIRED) {
|
if (lock != OCF_LOCK_ACQUIRED) {
|
||||||
env_mutex_lock(&alock->lock);
|
env_mutex_lock(&alock->lock);
|
||||||
lock = ocf_alock_lock_wr_slow(alock, req, cmpl);
|
|
||||||
|
ENV_BUG_ON(env_atomic_read(&req->lock_remaining));
|
||||||
|
ENV_BUG_ON(!cmpl);
|
||||||
|
|
||||||
|
env_atomic_inc(&alock->waiting);
|
||||||
|
env_atomic_set(&req->lock_remaining, req->core_line_count);
|
||||||
|
env_atomic_inc(&req->lock_remaining);
|
||||||
|
|
||||||
|
status = alock->cbs->lock_entries_slow(alock, req, OCF_WRITE, cmpl);
|
||||||
|
if (!status) {
|
||||||
|
if (env_atomic_dec_return(&req->lock_remaining) == 0) {
|
||||||
|
lock = OCF_LOCK_ACQUIRED;
|
||||||
|
env_atomic_dec(&alock->waiting);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
env_atomic_set(&req->lock_remaining, 0);
|
||||||
|
env_atomic_dec(&alock->waiting);
|
||||||
|
}
|
||||||
env_mutex_unlock(&alock->lock);
|
env_mutex_unlock(&alock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ocf_alock_unlock_rd(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
|
|
||||||
OCF_DEBUG_RQ(req, "Unlock");
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_WRITE));
|
|
||||||
|
|
||||||
if (!alock->cbs->line_is_acting(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!alock->cbs->line_is_locked(alock, req, i, OCF_READ))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
|
|
||||||
ocf_alock_unlock_one_rd(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ocf_alock_unlock_wr(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
|
|
||||||
OCF_DEBUG_RQ(req, "Unlock");
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
ENV_BUG_ON(alock->cbs->line_is_locked(alock, req, i, OCF_READ));
|
|
||||||
|
|
||||||
if (!alock->cbs->line_is_acting(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!alock->cbs->line_is_locked(alock, req, i, OCF_WRITE))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
|
|
||||||
ocf_alock_unlock_one_wr(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ocf_alock_unlock(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req)
|
|
||||||
{
|
|
||||||
int32_t i;
|
|
||||||
ocf_cache_line_t entry;
|
|
||||||
|
|
||||||
OCF_DEBUG_RQ(req, "Unlock");
|
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; i++) {
|
|
||||||
if (!alock->cbs->line_is_acting(alock, req, i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
entry = alock->cbs->line_get_entry(alock, req, i);
|
|
||||||
ENV_BUG_ON(entry >= alock->num_entries);
|
|
||||||
|
|
||||||
if (alock->cbs->line_is_locked(alock, req, i, OCF_READ) &&
|
|
||||||
alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) {
|
|
||||||
ENV_BUG();
|
|
||||||
} else if (alock->cbs->line_is_locked(alock, req, i, OCF_READ)) {
|
|
||||||
ocf_alock_unlock_one_rd(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_READ, false);
|
|
||||||
} else if (alock->cbs->line_is_locked(alock, req, i, OCF_WRITE)) {
|
|
||||||
ocf_alock_unlock_one_wr(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, i, OCF_WRITE, false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ocf_alock_unlock_one(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, uint32_t idx)
|
|
||||||
{
|
|
||||||
ocf_cache_line_t entry = alock->cbs->line_get_entry(alock, req, idx);
|
|
||||||
|
|
||||||
ENV_BUG_ON(!alock->cbs->line_is_acting(alock, req, idx));
|
|
||||||
|
|
||||||
if (alock->cbs->line_is_locked(alock, req, idx, OCF_READ) &&
|
|
||||||
alock->cbs->line_is_locked(alock, req, idx, OCF_WRITE)) {
|
|
||||||
ENV_BUG();
|
|
||||||
} else if (alock->cbs->line_is_locked(alock, req, idx, OCF_READ)) {
|
|
||||||
ocf_alock_unlock_one_rd(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_READ, false);
|
|
||||||
} else if (alock->cbs->line_is_locked(alock, req, idx, OCF_WRITE)) {
|
|
||||||
ocf_alock_unlock_one_wr(alock, entry);
|
|
||||||
alock->cbs->line_mark_locked(alock, req, idx, OCF_WRITE, false);
|
|
||||||
} else {
|
|
||||||
ENV_BUG();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ocf_cache_line_is_used(struct ocf_alock *alock,
|
bool ocf_cache_line_is_used(struct ocf_alock *alock,
|
||||||
ocf_cache_line_t entry)
|
ocf_cache_line_t entry)
|
||||||
{
|
{
|
||||||
|
@ -20,29 +20,16 @@ struct ocf_alock;
|
|||||||
/* async request cacheline lock acquisition callback */
|
/* async request cacheline lock acquisition callback */
|
||||||
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
|
typedef void (*ocf_req_async_lock_cb)(struct ocf_request *req);
|
||||||
|
|
||||||
typedef bool (*ocf_cl_lock_line_needs_lock_cb)(struct ocf_alock *alock,
|
typedef int (*ocf_cl_lock_fast)(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, unsigned index);
|
struct ocf_request *req, int rw);
|
||||||
|
|
||||||
typedef bool (*ocf_cl_lock_line_is_acting_cb)(struct ocf_alock *alock,
|
typedef int (*ocf_cl_lock_slow)(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, unsigned index);
|
struct ocf_request *req, int rw, ocf_req_async_lock_cb cmpl);
|
||||||
|
|
||||||
typedef bool (*ocf_cl_lock_line_is_locked_cb)(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw);
|
|
||||||
|
|
||||||
typedef void (*ocf_cl_lock_line_mark_locked_cb)(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req, unsigned index, int rw, bool locked);
|
|
||||||
|
|
||||||
typedef ocf_cache_line_t (*ocf_cl_lock_line_get_entry_cb)(
|
|
||||||
struct ocf_alock *alock, struct ocf_request *req,
|
|
||||||
unsigned index);
|
|
||||||
|
|
||||||
struct ocf_alock_lock_cbs
|
struct ocf_alock_lock_cbs
|
||||||
{
|
{
|
||||||
ocf_cl_lock_line_needs_lock_cb line_needs_lock;
|
ocf_cl_lock_fast lock_entries_fast;
|
||||||
ocf_cl_lock_line_is_acting_cb line_is_acting;
|
ocf_cl_lock_slow lock_entries_slow;
|
||||||
ocf_cl_lock_line_is_locked_cb line_is_locked;
|
|
||||||
ocf_cl_lock_line_mark_locked_cb line_mark_locked;
|
|
||||||
ocf_cl_lock_line_get_entry_cb line_get_entry;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool ocf_alock_trylock_one_rd(struct ocf_alock *alock,
|
bool ocf_alock_trylock_one_rd(struct ocf_alock *alock,
|
||||||
@ -63,15 +50,6 @@ int ocf_alock_lock_rd(struct ocf_alock *alock,
|
|||||||
int ocf_alock_lock_wr(struct ocf_alock *alock,
|
int ocf_alock_lock_wr(struct ocf_alock *alock,
|
||||||
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
|
struct ocf_request *req, ocf_req_async_lock_cb cmpl);
|
||||||
|
|
||||||
void ocf_alock_unlock_rd(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req);
|
|
||||||
|
|
||||||
void ocf_alock_unlock_wr(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req);
|
|
||||||
|
|
||||||
void ocf_alock_unlock(struct ocf_alock *alock,
|
|
||||||
struct ocf_request *req);
|
|
||||||
|
|
||||||
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
|
bool ocf_alock_waitlist_is_empty(struct ocf_alock *alock,
|
||||||
ocf_cache_line_t entry);
|
ocf_cache_line_t entry);
|
||||||
|
|
||||||
@ -92,4 +70,24 @@ void ocf_alock_deinit(struct ocf_alock **self);
|
|||||||
|
|
||||||
size_t ocf_alock_size(unsigned num_entries);
|
size_t ocf_alock_size(unsigned num_entries);
|
||||||
|
|
||||||
|
bool ocf_alock_is_index_locked(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, unsigned index);
|
||||||
|
|
||||||
|
void ocf_alock_mark_index_locked(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, unsigned index, bool locked);
|
||||||
|
|
||||||
|
bool ocf_alock_lock_one_wr(struct ocf_alock *alock,
|
||||||
|
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
||||||
|
void *req, uint32_t idx);
|
||||||
|
|
||||||
|
bool ocf_alock_lock_one_rd(struct ocf_alock *alock,
|
||||||
|
const ocf_cache_line_t entry, ocf_req_async_lock_cb cmpl,
|
||||||
|
void *req, uint32_t idx);
|
||||||
|
|
||||||
|
void ocf_alock_waitlist_remove_entry(struct ocf_alock *alock,
|
||||||
|
struct ocf_request *req, ocf_cache_line_t entry, int i, int rw);
|
||||||
|
|
||||||
|
bool ocf_alock_trylock_entry_rd_idle(struct ocf_alock *alock,
|
||||||
|
ocf_cache_line_t entry);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -160,6 +160,7 @@ pthread_mutex_t prog_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|||||||
struct test_req {
|
struct test_req {
|
||||||
struct ocf_request r;
|
struct ocf_request r;
|
||||||
struct ocf_map_info map[TEST_MAX_MAP_SIZE];
|
struct ocf_map_info map[TEST_MAX_MAP_SIZE];
|
||||||
|
uint8_t alock_map[TEST_MAX_MAP_SIZE];
|
||||||
pthread_cond_t completion;
|
pthread_cond_t completion;
|
||||||
pthread_mutex_t completion_mutex;
|
pthread_mutex_t completion_mutex;
|
||||||
bool finished;
|
bool finished;
|
||||||
@ -249,6 +250,7 @@ void thread(void *_ctx)
|
|||||||
bool locked;
|
bool locked;
|
||||||
|
|
||||||
ctx->treq.r.map = &ctx->treq.map;
|
ctx->treq.r.map = &ctx->treq.map;
|
||||||
|
ctx->treq.r.alock_status = &ctx->treq.alock_map;
|
||||||
pthread_cond_init(&ctx->treq.completion, NULL);
|
pthread_cond_init(&ctx->treq.completion, NULL);
|
||||||
pthread_mutex_init(&ctx->treq.completion_mutex, NULL);
|
pthread_mutex_init(&ctx->treq.completion_mutex, NULL);
|
||||||
|
|
||||||
@ -399,12 +401,13 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline
|
|||||||
{
|
{
|
||||||
if (!threads[i].finished)
|
if (!threads[i].finished)
|
||||||
{
|
{
|
||||||
unsigned num_clines = threads[i].treq.r.core_line_count;
|
struct ocf_request *req = &threads[i].treq.r;
|
||||||
|
unsigned num_clines = req->core_line_count;
|
||||||
struct ocf_map_info **clines = malloc(num_clines *
|
struct ocf_map_info **clines = malloc(num_clines *
|
||||||
sizeof(*clines));
|
sizeof(*clines));
|
||||||
for (j = 0; j < num_clines; j++)
|
for (j = 0; j < num_clines; j++)
|
||||||
{
|
{
|
||||||
clines[j] = &threads[i].treq.r.map[j];
|
clines[j] = &req->map[j];
|
||||||
}
|
}
|
||||||
|
|
||||||
qsort(clines, num_clines, sizeof(*clines), cmp_map);
|
qsort(clines, num_clines, sizeof(*clines), cmp_map);
|
||||||
@ -412,8 +415,8 @@ static void cctest(unsigned num_threads, unsigned num_iterations, unsigned cline
|
|||||||
print_message("thread no %u\n", i);
|
print_message("thread no %u\n", i);
|
||||||
for (j = 0; j < num_clines; j++) {
|
for (j = 0; j < num_clines; j++) {
|
||||||
struct ocf_map_info *map = clines[j];
|
struct ocf_map_info *map = clines[j];
|
||||||
const char *status = map->rd_locked ? "R" :
|
const char *status = env_bit_test(index, (unsigned long*)req->alock_status) ?
|
||||||
map->wr_locked ? "W" : "X";
|
(req->alock_rw == OCF_WRITE ? "W" : "R") : "X";
|
||||||
print_message("[%u] %u %s\n", j, map->coll_idx, status);
|
print_message("[%u] %u %s\n", j, map->coll_idx, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user