Limit workload for large cores in seq cutoff test

So far the test was writing data equal in size to the core size.
With xTB cores this results in a very long execution time.

Signed-off-by: Daniel Madej <daniel.madej@intel.com>
This commit is contained in:
Daniel Madej 2021-08-11 11:23:08 +02:00
parent 5afc8af0e8
commit b6f33e733b

View File

@ -5,7 +5,9 @@
import os import os
import random import random
from time import sleep from time import sleep
import pytest import pytest
from api.cas import casadm from api.cas import casadm
from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait from api.cas.cache_config import CacheMode, SeqCutOffPolicy, CacheModeTrait
from core.test_run_utils import TestRun from core.test_run_utils import TestRun
@ -159,7 +161,7 @@ def test_multistream_seq_cutoff_stress_raw(streams_seq_rand):
core.reset_counters() core.reset_counters()
with TestRun.step("Run I/O"): with TestRun.step("Run I/O"):
stream_size = core_disk.size / 256 stream_size = min(core_disk.size / 256, Size(256, Unit.MebiByte))
sequential_streams = streams_seq_rand[0] sequential_streams = streams_seq_rand[0]
random_streams = streams_seq_rand[1] random_streams = streams_seq_rand[1]
fio = (Fio().create_command() fio = (Fio().create_command()