diff --git a/casadm/cas_main.c b/casadm/cas_main.c index 6d8f8b1..3fae855 100644 --- a/casadm/cas_main.c +++ b/casadm/cas_main.c @@ -740,7 +740,7 @@ static struct cas_param cas_cache_params[] = { #define CLEANING_POLICY_TYPE_DESC "Cleaning policy type. " \ "Available policy types: {nop|alru|acp}" -#define CLEANING_ALRU_WAKE_UP_DESC "Period of time between awakenings of flushing thread <%d-%d>[s] (default: %d s)" +#define CLEANING_ALRU_WAKE_UP_DESC "Cleaning thread sleep time after an idle wake up <%d-%d>[s] (default: %d s)" #define CLEANING_ALRU_STALENESS_TIME_DESC "Time that has to pass from the last write operation before a dirty cache" \ " block can be scheduled to be flushed <%d-%d>[s] (default: %d s)" #define CLEANING_ALRU_FLUSH_MAX_BUFFERS_DESC "Number of dirty cache blocks to be flushed in one cleaning cycle" \ diff --git a/casadm/casadm.8 b/casadm/casadm.8 index 4edffd2..8575906 100644 --- a/casadm/casadm.8 +++ b/casadm/casadm.8 @@ -286,7 +286,9 @@ Identifier of cache instance <1-16384>. .TP .B -w, --wake-up -Period of time between awakenings of flushing thread [s] (default: 20 s). +Cleaning thread sleep time after an idle wake up [s] (default: 20 s). +Idle wake up happens when there is no dirty data or the cleaning thread does not start cleaning +due to staleness time and/or activity threshold constraints. .TP .B -s, --staleness-time diff --git a/test/functional/tests/lazy_writes/cleaning_policy/test_cleaning_params.py b/test/functional/tests/lazy_writes/cleaning_policy/test_cleaning_params.py index 8ae4f32..9a4e91e 100644 --- a/test/functional/tests/lazy_writes/cleaning_policy/test_cleaning_params.py +++ b/test/functional/tests/lazy_writes/cleaning_policy/test_cleaning_params.py @@ -141,10 +141,13 @@ def test_cleaning_policy_config(): f"Dirty data before pause: {core_dirty_before}\n" f"Dirty data after pause: {core_dirty_after}" ) - elif core_dirty_before != core_dirty_after + data_to_flush: + # Only check whether a minimum amount of data is flushed, as ALRU does not have + # a configurable sleep time between active flushing iterations which would allow + # to precisely estimate expected amount of data. + elif core_dirty_before < core_dirty_after + data_to_flush: TestRun.LOGGER.error( f"Number of dirty blocks flushed differs from configured in policy.\n" - f"Expected dirty data flushed: {data_to_flush}\n" + f"Expected minimum dirty data flushed: {data_to_flush}\n" f"Actual dirty data flushed: " f"{core_dirty_before - core_dirty_after}" )