Compare commits
No commits in common. "8e3aab2f45ba9a80388b2622df5ee290491eb0f2" and "c7e47e226ad74493e9d8fa3af8369f4d3b3b61af" have entirely different histories.
8e3aab2f45
...
c7e47e226a
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1 +0,0 @@
|
|||||||
tests/** -linguist-detectable
|
|
@ -383,11 +383,6 @@ static void _ocf_mngt_deinit_added_cores(
|
|||||||
if (context->cfg.open_cores)
|
if (context->cfg.open_cores)
|
||||||
ocf_volume_close(volume);
|
ocf_volume_close(volume);
|
||||||
|
|
||||||
if (core->front_volume.opened) {
|
|
||||||
ocf_volume_close(&core->front_volume);
|
|
||||||
ocf_volume_deinit(&core->front_volume);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (core->seq_cutoff)
|
if (core->seq_cutoff)
|
||||||
ocf_core_seq_cutoff_deinit(core);
|
ocf_core_seq_cutoff_deinit(core);
|
||||||
|
|
||||||
|
@ -595,7 +595,6 @@ static int _ocf_cleaner_check_map(struct ocf_request *req)
|
|||||||
{
|
{
|
||||||
ocf_core_id_t core_id;
|
ocf_core_id_t core_id;
|
||||||
uint64_t core_line;
|
uint64_t core_line;
|
||||||
bool nothing_to_submit = true;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < req->core_line_count; ++i) {
|
for (i = 0; i < req->core_line_count; ++i) {
|
||||||
@ -612,12 +611,6 @@ static int _ocf_cleaner_check_map(struct ocf_request *req)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
req->map[i].flush = true;
|
req->map[i].flush = true;
|
||||||
nothing_to_submit = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nothing_to_submit) {
|
|
||||||
_ocf_cleaner_finish_req(req);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_ocf_cleaner_fire_cache(req);
|
_ocf_cleaner_fire_cache(req);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2019-2022 Intel Corporation
|
* Copyright(c) 2019-2022 Intel Corporation
|
||||||
* Copyright(c) 2023-2025 Huawei Technologies
|
* Copyright(c) 2023-2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -10,14 +10,6 @@
|
|||||||
#include "../ocf_request.h"
|
#include "../ocf_request.h"
|
||||||
#include "utils_pipeline.h"
|
#include "utils_pipeline.h"
|
||||||
|
|
||||||
#define OCF_PL_DEBUG 0
|
|
||||||
#if OCF_PL_DEBUG == 1
|
|
||||||
#define OCF_DEBUG_LOG(cache, format, ...) \
|
|
||||||
ocf_cache_log(cache, log_debug, format, ##__VA_ARGS__)
|
|
||||||
#else
|
|
||||||
#define OCF_DEBUG_LOG(cache, format, ...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define OCF_PIPELINE_ALIGNMENT 64
|
#define OCF_PIPELINE_ALIGNMENT 64
|
||||||
|
|
||||||
struct ocf_pipeline {
|
struct ocf_pipeline {
|
||||||
@ -45,7 +37,7 @@ static int _ocf_pipeline_run_step(struct ocf_request *req)
|
|||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
step = &pipeline->properties->steps[pipeline->next_step];
|
step = &pipeline->properties->steps[pipeline->next_step];
|
||||||
OCF_DEBUG_LOG(req->cache, "PL STEP: %s\n", step->name);
|
ocf_cache_log(req->cache, log_debug, "PL STEP: %s\n", step->name);
|
||||||
switch (step->type) {
|
switch (step->type) {
|
||||||
case ocf_pipeline_step_single:
|
case ocf_pipeline_step_single:
|
||||||
pipeline->next_step++;
|
pipeline->next_step++;
|
||||||
|
@ -17,7 +17,6 @@ from pyocf.types.io import IoDir, Sync
|
|||||||
from pyocf.types.queue import Queue
|
from pyocf.types.queue import Queue
|
||||||
from pyocf.utils import Size as S
|
from pyocf.utils import Size as S
|
||||||
from pyocf.types.shared import OcfError, OcfErrorCode, OcfCompletion, CacheLineSize
|
from pyocf.types.shared import OcfError, OcfErrorCode, OcfCompletion, CacheLineSize
|
||||||
from pyocf.rio import Rio, ReadWrite
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cache_mode", CacheMode)
|
@pytest.mark.parametrize("cache_mode", CacheMode)
|
||||||
@ -153,15 +152,13 @@ def test_detach_cache_detach_core_cleaning(pyocf_ctx, cleaning_policy, promotion
|
|||||||
cache.set_promotion_policy(promotion_policy)
|
cache.set_promotion_policy(promotion_policy)
|
||||||
|
|
||||||
for core in [core_1, core_2]:
|
for core in [core_1, core_2]:
|
||||||
r = (
|
vol = CoreVolume(core)
|
||||||
Rio()
|
queue = core.cache.get_default_queue()
|
||||||
.target(CoreVolume(core))
|
|
||||||
.readwrite(ReadWrite.WRITE)
|
core_size = core.get_stats()["size"]
|
||||||
.size(core.get_stats()["size"])
|
data = Data(core_size.B)
|
||||||
.qd(1)
|
|
||||||
.bs(S.from_KiB(64))
|
_io_to_core(vol, queue, data)
|
||||||
.run([core.cache.get_default_queue()])
|
|
||||||
)
|
|
||||||
|
|
||||||
core_1.detach()
|
core_1.detach()
|
||||||
|
|
||||||
@ -181,15 +178,13 @@ def test_detach_cache_retach_core_cleaning(pyocf_ctx, cleaning_policy, promotion
|
|||||||
|
|
||||||
def _write_cores(cores_list):
|
def _write_cores(cores_list):
|
||||||
for core in cores_list:
|
for core in cores_list:
|
||||||
r = (
|
vol = CoreVolume(core)
|
||||||
Rio()
|
queue = core.cache.get_default_queue()
|
||||||
.target(CoreVolume(core))
|
|
||||||
.readwrite(ReadWrite.WRITE)
|
core_size = core.get_stats()["size"]
|
||||||
.size(core.get_stats()["size"])
|
data = Data(core_size.B)
|
||||||
.qd(1)
|
|
||||||
.bs(S.from_KiB(64))
|
_io_to_core(vol, queue, data)
|
||||||
.run([core.cache.get_default_queue()])
|
|
||||||
)
|
|
||||||
|
|
||||||
cache.add_core(core_1)
|
cache.add_core(core_1)
|
||||||
cache.add_core(core_2)
|
cache.add_core(core_2)
|
||||||
@ -227,15 +222,13 @@ def test_reattach_cache_reattach_core_cleaning(pyocf_ctx, cleaning_policy, promo
|
|||||||
|
|
||||||
def _write_cores(cores_list):
|
def _write_cores(cores_list):
|
||||||
for core in cores_list:
|
for core in cores_list:
|
||||||
r = (
|
vol = CoreVolume(core)
|
||||||
Rio()
|
queue = core.cache.get_default_queue()
|
||||||
.target(CoreVolume(core))
|
|
||||||
.readwrite(ReadWrite.WRITE)
|
core_size = core.get_stats()["size"]
|
||||||
.size(core.get_stats()["size"])
|
data = Data(core_size.B)
|
||||||
.qd(1)
|
|
||||||
.bs(S.from_KiB(64))
|
_io_to_core(vol, queue, data)
|
||||||
.run([core.cache.get_default_queue()])
|
|
||||||
)
|
|
||||||
|
|
||||||
cache.add_core(core_1)
|
cache.add_core(core_1)
|
||||||
cache.add_core(core_2)
|
cache.add_core(core_2)
|
||||||
|
@ -25,7 +25,6 @@ from pyocf.types.shared import (
|
|||||||
from pyocf.types.volume import RamVolume
|
from pyocf.types.volume import RamVolume
|
||||||
from pyocf.types.volume_core import CoreVolume
|
from pyocf.types.volume_core import CoreVolume
|
||||||
from pyocf.utils import Size
|
from pyocf.utils import Size
|
||||||
from pyocf.rio import Rio, ReadWrite
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -97,15 +96,17 @@ def test_d2c_io(pyocf_ctx):
|
|||||||
|
|
||||||
cache.detach_device()
|
cache.detach_device()
|
||||||
|
|
||||||
r = (
|
vol = CoreVolume(core_1)
|
||||||
Rio()
|
queue = core_1.cache.get_default_queue()
|
||||||
.target(CoreVolume(core_1))
|
data = Data(4096)
|
||||||
.readwrite(ReadWrite.WRITE)
|
vol.open()
|
||||||
.size(core_1.get_stats()["size"])
|
io = vol.new_io(queue, 0, data.size, IoDir.WRITE, 0, 0)
|
||||||
.qd(1)
|
io.set_data(data)
|
||||||
.bs(Size.from_KiB(64))
|
|
||||||
.run([core_1.cache.get_default_queue()])
|
completion = Sync(io).submit()
|
||||||
)
|
|
||||||
|
vol.close()
|
||||||
|
assert completion.results["err"] == 0
|
||||||
|
|
||||||
|
|
||||||
def test_detach_cache_zero_superblock(pyocf_ctx):
|
def test_detach_cache_zero_superblock(pyocf_ctx):
|
||||||
|
Loading…
Reference in New Issue
Block a user