From b6f33e733bf0552b01ef5ba64345365225938869 Mon Sep 17 00:00:00 2001 From: Daniel Madej Date: Wed, 11 Aug 2021 11:23:08 +0200 Subject: [PATCH] Limit workload for large cores in seq cutoff test So far the test was writing data equal in size to the core size. With xTB cores this results in a very long execution time. Signed-off-by: Daniel Madej --- .../functional/tests/cache_ops/test_multistream_seq_cutoff.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py b/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py index 8382a49..3f64bd4 100644 --- a/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py +++ b/test/functional/tests/cache_ops/test_multistream_seq_cutoff.py @@ -5,7 +5,9 @@ import os import random from time import sleep + import pytest + from api.cas import casadm from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait from core.test_run_utils import TestRun @@ -159,7 +161,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand): core.reset_counters() with TestRun.step("Run I/O"): - stream_size = core_disk.size / 256 + stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte)) sequential_streams = streams_seq_rand[0] random_streams = streams_seq_rand[1] fio = (Fio().create_command()