Merge pull request #34 from arutk/engine_wo

Extend CAS interface with Write-only cache mode
This commit is contained in:
Michal Rakowski 2019-06-14 12:41:43 +02:00 committed by GitHub
commit 815c1164ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 124 additions and 24 deletions

View File

@ -254,6 +254,7 @@ static struct name_to_val_mapping cache_mode_names[] = {
#ifdef WI_AVAILABLE #ifdef WI_AVAILABLE
{ .short_name = "wi", .long_name = "Write-Invalidate", .value = ocf_cache_mode_wi }, { .short_name = "wi", .long_name = "Write-Invalidate", .value = ocf_cache_mode_wi },
#endif #endif
{ .short_name = "wo", .long_name = "Write-Only", .value = ocf_cache_mode_wo },
{ NULL } { NULL }
}; };
@ -1062,6 +1063,7 @@ int set_cache_mode(unsigned int cache_mode, unsigned int cache_id, int flush)
int fd = 0; int fd = 0;
int orig_mode; int orig_mode;
struct kcas_set_cache_state cmd; struct kcas_set_cache_state cmd;
bool flush_param_required;
fd = open_ctrl_device(); fd = open_ctrl_device();
if (fd == -1) if (fd == -1)
@ -1073,9 +1075,13 @@ int set_cache_mode(unsigned int cache_mode, unsigned int cache_id, int flush)
return FAILURE; return FAILURE;
} }
/* if flushing mode is undefined, set it to default (but only if original mode is write back mode) */ /* If flushing mode is undefined, set it to default unless we're transitioning
* out of lazy write cache mode (like WB or WO), in which case user must explicitly
* state his preference */
flush_param_required = ocf_mngt_cache_mode_has_lazy_write(orig_mode) &&
!ocf_mngt_cache_mode_has_lazy_write(cache_mode);
if (-1 == flush) { if (-1 == flush) {
if (ocf_cache_mode_wb == orig_mode) { if (flush_param_required) {
cas_printf(LOG_ERR, "Error: Required parameter (--flush-cache) was not specified.\n"); cas_printf(LOG_ERR, "Error: Required parameter (--flush-cache) was not specified.\n");
close(fd); close(fd);
return FAILURE; return FAILURE;
@ -1084,14 +1090,15 @@ int set_cache_mode(unsigned int cache_mode, unsigned int cache_id, int flush)
} }
} }
if (ocf_cache_mode_wb == orig_mode) { if (flush_param_required) {
if (1 == flush) { if (1 == flush) {
cas_printf(LOG_INFO, "CAS is currently flushing dirty data to primary storage devices.\n"); cas_printf(LOG_INFO, "CAS is currently flushing dirty data to primary storage devices.\n");
} else { } else {
cas_printf(LOG_INFO, "CAS is currently migrating from Write-Back to %s mode.\n" cas_printf(LOG_INFO, "CAS is currently migrating from %s to %s mode.\n"
"Dirty data are being flushed to primary storage device in background.\n" "Dirty data are being flushed to primary storage device in background.\n"
"Please find flushing progress via list caches command (casadm -L) or\n" "Please find flushing progress via list caches command (casadm -L) or\n"
"via statistics command (casadm -P).\n", "via statistics command (casadm -P).\n",
cache_mode_to_name_long(orig_mode),
cache_mode_to_name_long(cache_mode)); cache_mode_to_name_long(cache_mode));
} }
} }

View File

@ -125,15 +125,15 @@ int start_cache(ocf_cache_id_t cache_id, unsigned int cache_init,
int stop_cache(ocf_cache_id_t cache_id, int flush); int stop_cache(ocf_cache_id_t cache_id, int flush);
#ifdef WI_AVAILABLE #ifdef WI_AVAILABLE
#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi" #define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi|wo"
#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi" #define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi|wo"
#define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate" #define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate, Write-Only"
#define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate" #define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Invalidate, Write-Only"
#else #else
#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt" #define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wo"
#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt" #define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wo"
#define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through" #define CAS_CLI_HELP_START_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only"
#define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through" #define CAS_CLI_HELP_SET_CACHE_MODES_FULL "Write-Through, Write-Back, Write-Around, Pass-Through, Write-Only"
#endif #endif
/** /**

View File

@ -900,7 +900,7 @@ int handle_get_param()
static cli_option set_state_cache_mode_options[] = { static cli_option set_state_cache_mode_options[] = {
{'c', "cache-mode", "Cache mode. Available cache modes: {"CAS_CLI_HELP_SET_CACHE_MODES"}", 1, "NAME", CLI_OPTION_REQUIRED}, {'c', "cache-mode", "Cache mode. Available cache modes: {"CAS_CLI_HELP_SET_CACHE_MODES"}", 1, "NAME", CLI_OPTION_REQUIRED},
{'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED}, {'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED},
{'f', "flush-cache", "Flush all dirty data from cache before switching to new mode. Option is required when switching from Write-Back mode", 1, "yes|no",0}, {'f', "flush-cache", "Flush all dirty data from cache before switching to new mode. Option is required when switching from Write-Back or Write-Only mode", 1, "yes|no",0},
{0}, {0},
}; };

View File

@ -911,9 +911,8 @@ int cache_stats_conf(int ctrl_fd, const struct kcas_cache_info *cache_info,
return FAILURE; return FAILURE;
print_kv_pair(outfile, "Inactive Core Devices", "%d", inactive_cores); print_kv_pair(outfile, "Inactive Core Devices", "%d", inactive_cores);
print_kv_pair(outfile, "Write Policy", "%s%s", print_kv_pair(outfile, "Write Policy", "%s",
(flush_progress && cache_info->info.cache_mode != ocf_cache_mode_wb) cache_mode_to_name(cache_info->info.cache_mode));
? "wb->" : "", cache_mode_to_name(cache_info->info.cache_mode));
print_kv_pair(outfile, "Eviction Policy", "%s", print_kv_pair(outfile, "Eviction Policy", "%s",
eviction_policy_to_name(cache_info->info.eviction_policy)); eviction_policy_to_name(cache_info->info.eviction_policy));
print_kv_pair(outfile, "Cleaning Policy", "%s", print_kv_pair(outfile, "Cleaning Policy", "%s",

2
ocf

@ -1 +1 @@
Subproject commit 75ec3c7db424e11b6e5fbde5f5afba12b824f849 Subproject commit aeaeafb639ee20f0320ed859efd4a002810f141d

View File

@ -6,8 +6,7 @@
# The line below specified that line under it should be used as the test's short description when launching test via run_tests script. # The line below specified that line under it should be used as the test's short description when launching test via run_tests script.
# The text should not be longer than 80 chars - if it is, the script will strip addititonal characters # The text should not be longer than 80 chars - if it is, the script will strip addititonal characters
# DESCRIPTION WriteBack mode test: Test files on core devices with different filesystems after flushing and stopping cache # DESCRIPTION WB data integrity ext3/ext4/xfs after clean shutdown, no flush
# with 'don't flush dirty data on exit' option.
# USE_IN_NIGHTLY # USE_IN_NIGHTLY
# USE_IN_BVT # USE_IN_BVT
@ -18,6 +17,8 @@ TESTS_DIR="$(dirname $0)/../"
. $TESTS_DIR/cas_lib . $TESTS_DIR/cas_lib
start_test $* start_test $*
CACHE_MODE="wb"
# This is where the real test starts # This is where the real test starts
# Use CACHE_DEVICE and CORE_DEVICE provided by configuration file and remove partitions from those devices # Use CACHE_DEVICE and CORE_DEVICE provided by configuration file and remove partitions from those devices
@ -30,15 +31,15 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example) # Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION="wb" start_cache CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example) # Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION="wb" start_cache CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example) # Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION="wb" start_cache CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be). # Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).

86
test/smoke_test/basic/13 Executable file
View File

@ -0,0 +1,86 @@
#!/bin/bash
#
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
# The line below specified that line under it should be used as the test's short description when launching test via run_tests script.
# The text should not be longer than 80 chars - if it is, the script will strip addititonal characters
# DESCRIPTION WO data integrity ext3/ext4/xfs after clean shutdown, no flush
# USE_IN_NIGHTLY
# USE_IN_BVT
# Standard beginning for every test - get the main tests directory and
# link the cas_lib file for CAS API, then use "start_test $*" to pass params
# and do other necessary checks and setup
TESTS_DIR="$(dirname $0)/../"
. $TESTS_DIR/cas_lib
start_test $*
CACHE_MODE="wo"
# This is where the real test starts
# Use CACHE_DEVICE and CORE_DEVICE provided by configuration file and remove partitions from those devices
TARGET_DEVICE_OPTION="$CACHE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
# Create 3 primary partitions on CACHE_DEVICE, each of 2000M size
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Create 3 primary partitions on CORE_DEVICE, each of 4000M size
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
# results have to be checked manually by the test.
TARGET_DEVICE_OPTION="${DEVICE_NAME}1-1" FILESYSTEM_TYPE="ext3" make_filesystem
TARGET_DEVICE_OPTION="${DEVICE_NAME}2-1" FILESYSTEM_TYPE="ext4" make_filesystem
TARGET_DEVICE_OPTION="${DEVICE_NAME}3-1" FILESYSTEM_TYPE="xfs" make_filesystem
# Specify a temporary file used for md5sums - note that it resides in $TMP_DIR, which is a special directory defined in cas_config.
# Everytime we use temporary files, they should be placed in that special directory. Its contents are cleared after every test.
MD5_FILE="$TMP_DIR/cas_md5_sum_"
# Mount the filesystems, then create the example files and save their MD5 sums locally in the temporary place
# Note the usage of ${MOUNTPOINT} - mount_cache always mounts using following formula: ${MOUNTPOINT}-${CACHE_ID_OPTION}-${CORE_ID_OPTION}.
for ID in 1 2 3 ; do
CACHE_ID_OPTION="$ID" CORE_ID_OPTION="1" mount_cache
run_cmd "dd if=/dev/urandom of=${MOUNTPOINT}-${ID}-1/test bs=50M count=1"
run_cmd "md5sum ${MOUNTPOINT}-${ID}-1/test > ${MD5_FILE}_${ID}"
done
# Umount & stop the caches, then mount the core devices
# Note the usage of ${MOUNTPOINT} - mount_cache always mounts using following formula: ${MOUNTPOINT}-${CACHE_ID_OPTION}-${CORE_ID_OPTION}.
for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" flush_cache
CACHE_ID_OPTION="$ID" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices
# Note the usage of ${MOUNTPOINT} - mount_cache always mounts using following formula: ${MOUNTPOINT}-${CACHE_ID_OPTION}-${CORE_ID_OPTION}.
for ID in 1 2 3 ; do
run_cmd "test -f ${MOUNTPOINT}-${ID}-1/test"
run_cmd "md5sum -c ${MD5_FILE}_${ID}"
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
done
# Always return 0 at the end of the test - if at any point something has failed
# in the API functions, test will end and return a proper result.
# If you need to check other things during the test and end the test earlier, you
# should end the test using "end_test $retval" function
end_test 0

View File

@ -359,7 +359,7 @@ iteration() {
fi fi
if [ -n "$CACHE_MODE_OPTION" ] ; then if [ -n "$CACHE_MODE_OPTION" ] ; then
if [ "$CACHE_MODE_OPTION" == "all" ] ; then if [ "$CACHE_MODE_OPTION" == "all" ] ; then
L_CACHE_MODE_OPTION="wa wb wt pt" L_CACHE_MODE_OPTION="wa wb wt pt wo"
else else
L_CACHE_MODE_OPTION="$CACHE_MODE_OPTION" L_CACHE_MODE_OPTION="$CACHE_MODE_OPTION"
fi fi

View File

@ -62,6 +62,13 @@ to the core device. Pass-Through mode may be used in case if user doesn't want t
cache any workload, for example in case if there are some maintenance operations cache any workload, for example in case if there are some maintenance operations
causing cache pollution. causing cache pollution.
.TP
.B Write-Only (wo)
In Write-Only mode write operations are handled exactly like in Write-Back mode. Read
operations do not promote data to cache. Reads are typically serviced by the core
device, unless corresponding cache lines are dirty.
.SH COMMANDS .SH COMMANDS
.TP .TP
.B -S, --start-cache .B -S, --start-cache

View File

@ -261,7 +261,7 @@ class cas_config(object):
format(self.device)) format(self.device))
def check_cache_mode_valid(self, cache_mode): def check_cache_mode_valid(self, cache_mode):
if cache_mode.lower() not in ['wt', 'pt', 'wa', 'wb']: if cache_mode.lower() not in ['wt', 'pt', 'wa', 'wb', 'wo']:
raise ValueError('Invalid cache mode {0}'.format(cache_mode)) raise ValueError('Invalid cache mode {0}'.format(cache_mode))
def check_cleaning_policy_valid(self, cleaning_policy): def check_cleaning_policy_valid(self, cleaning_policy):