Merge pull request #535 from Ostrokrzew/by-id

Disallow to use other than by-id path to core or cache device
This commit is contained in:
Robert Baldyga 2020-12-23 14:38:07 +01:00 committed by GitHub
commit 309b674fd8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
99 changed files with 740 additions and 724 deletions

View File

@ -36,6 +36,9 @@
#include "safeclib/safe_lib.h"
#include <cas_ioctl_codes.h>
#include "psort.h"
#include <libgen.h>
#include <regex.h>
#define PRINT_STAT(x) header->cmd_input.cache_stats.x
#define CORE_ADD_MAX_TIMEOUT 30
@ -547,6 +550,42 @@ void print_slow_atomic_cache_start_info(const char *device_path)
}
}
/**
* Save to dest an absolute device file path of src.
* Return number of characters copied to dest if succeed, negative value if failed.
*/
static int get_abs_path(char* dest, size_t dest_len, const char* src, size_t src_len)
{
int path_len = -FAILURE;
char *dir_name, *dev_name;
char *dev = strndup(src, src_len); // strdup creates hidden malloc
if (!dev) // basename/dirname may modify the source and
goto dev_err; // segfault when called with a static string
char *dir = strndup(src, src_len);
if (!dir)
goto dir_err;
dir_name = realpath(dirname(dir), NULL); // realpath creates hidden malloc
if (!dir_name)
goto dir_name_err;
dev_name = basename(dev);
if (!dev_name)
goto dev_name_err;
path_len = snprintf(dest, dest_len, "%s/%s", dir_name, dev_name);
dev_name_err:
free(dir_name);
dir_name_err:
free(dir);
dir_err:
free(dev);
dev_err:
return path_len;
}
/**
* @brief get special device file path (/dev/sdX) for disk.
*/
@ -565,7 +604,66 @@ int get_dev_path(const char* disk, char* buf, size_t num)
return err;
}
int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info)
/* Indicate whether given path should be passed without check */
static bool is_dev_link_whitelisted(const char* path)
{
regex_t regex;
int result;
static const char* const whitelist[] = {"/dev/cas[0-9]\\+-[0-9]\\+$"};
static const unsigned count = ARRAY_SIZE(whitelist);
size_t i;
for (i = 0; i < count; i++) {
result = regcomp(&regex, whitelist[i], REG_NOSUB);
if (result)
return FAILURE;
result = regexec(&regex, path, 0, NULL, 0);
regfree(&regex);
if (!result) {
return true;
}
}
return false;
}
static int _is_by_id_path(const char* dev_path)
{
static const char dev_by_id_dir[] = "/dev/disk/by-id";
return (!strncmp(dev_path, dev_by_id_dir,
strnlen_s(dev_by_id_dir, sizeof(dev_by_id_dir))));
}
int set_device_path(char *dest_path, size_t dest_len, const char *src_path, size_t src_len)
{
char abs_dev_path[MAX_STR_LEN];
int result;
/* save given path as absolute path in temporary variable */
if (get_abs_path(abs_dev_path, sizeof(abs_dev_path), src_path, src_len) < 0)
return FAILURE;
/* check if given dev_path is whitelisted and then pass it as path or not */
if (is_dev_link_whitelisted(abs_dev_path)){
result = strncpy_s(dest_path, dest_len, abs_dev_path,
strnlen_s(abs_dev_path, sizeof(abs_dev_path)));
return result ?: SUCCESS;
}
if (_is_by_id_path(abs_dev_path)) {
result = strncpy_s(dest_path, dest_len, abs_dev_path,
strnlen_s(abs_dev_path, sizeof(abs_dev_path)));
if (!result)
return SUCCESS;
}
return FAILURE;
}
int get_core_info(int fd, int cache_id, int core_id,
struct kcas_core_info *info, bool by_id_path)
{
memset(info, 0, sizeof(*info));
info->cache_id = cache_id;
@ -575,19 +673,20 @@ int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info
return FAILURE;
}
/* internally use device special file path to describe core */
if (get_dev_path(info->core_path_name,
info->core_path_name,
if (!by_id_path) {
if (get_dev_path(info->core_path_name, info->core_path_name,
sizeof(info->core_path_name))) {
cas_printf(LOG_WARNING, "WARNING: Can not resolve path to core "
"%d from cache %d. By-id path will be shown for that core.\n",
cas_printf(LOG_WARNING, "WARNING: Can not resolve path to core %d "
"from cache %d. By-id path will be shown for that core.\n",
core_id, cache_id);
}
}
return SUCCESS;
}
static int get_core_device(int cache_id, int core_id, struct core_device *core)
static int get_core_device(int cache_id, int core_id,
struct core_device *core, bool by_id_path)
{
int fd;
struct kcas_core_info cmd_info;
@ -599,7 +698,7 @@ static int get_core_device(int cache_id, int core_id, struct core_device *core)
if (fd == -1)
return FAILURE;
if (get_core_info(fd, cache_id, core_id, &cmd_info)) {
if (get_core_info(fd, cache_id, core_id, &cmd_info, by_id_path)) {
cas_printf(LOG_ERR, "Error while retrieving stats\n");
print_err(cmd_info.ext_err_code);
close(fd);
@ -693,7 +792,7 @@ error_out:
*
* @return valid pointer to a structure or NULL if error happened
*/
struct cache_device *get_cache_device(const struct kcas_cache_info *info)
struct cache_device *get_cache_device(const struct kcas_cache_info *info, bool by_id_path)
{
int core_id, cache_id, ret;
struct cache_device *cache;
@ -713,8 +812,11 @@ struct cache_device *get_cache_device(const struct kcas_cache_info *info)
cache->expected_core_count = info->info.core_count;
cache->id = cache_id;
cache->state = info->info.state;
strncpy_s(cache->device, sizeof(cache->device), info->cache_path_name,
strnlen_s(info->cache_path_name, sizeof(info->cache_path_name)));
if (set_device_path(cache->device, sizeof(cache->device), info->cache_path_name,
sizeof(info->cache_path_name)) != SUCCESS) {
free(cache);
return NULL;
}
cache->mode = info->info.cache_mode;
cache->dirty = info->info.dirty;
cache->flushed = info->info.flushed;
@ -730,7 +832,7 @@ struct cache_device *get_cache_device(const struct kcas_cache_info *info)
for (cache->core_count = 0; cache->core_count < info->info.core_count; ++cache->core_count) {
core_id = info->core_id[cache->core_count];
ret = get_core_device(cache_id, core_id, &core);
ret = get_core_device(cache_id, core_id, &core, by_id_path);
if (0 != ret) {
break;
} else {
@ -752,7 +854,7 @@ struct cache_device *get_cache_device(const struct kcas_cache_info *info)
* @param cache_id cache id (1...)
* @return valid pointer to a structure or NULL if error happened
*/
struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd)
struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd, bool by_id_path)
{
struct kcas_cache_info cmd_info;
@ -764,7 +866,7 @@ struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd)
return NULL;
}
return get_cache_device(&cmd_info);
return get_cache_device(&cmd_info, by_id_path);
}
void free_cache_devices_list(struct cache_device **caches, int caches_count)
@ -777,7 +879,7 @@ void free_cache_devices_list(struct cache_device **caches, int caches_count)
free(caches);
}
struct cache_device **get_cache_devices(int *caches_count)
struct cache_device **get_cache_devices(int *caches_count, bool by_id_path)
{
int i, fd, status, chunk_size, count;
struct kcas_cache_list cache_list;
@ -823,7 +925,8 @@ struct cache_device **get_cache_devices(int *caches_count)
/* iterate through id table and get status */
for (i = 0; i < cache_list.in_out_num; i++) {
if ((tmp_cache = get_cache_device_by_id_fd(cache_list.cache_id_tab[i], fd)) == NULL) {
if ((tmp_cache = get_cache_device_by_id_fd(cache_list.cache_id_tab[i],
fd, by_id_path)) == NULL) {
cas_printf(LOG_ERR, "Failed to retrieve cache information!\n");
continue;
}
@ -852,7 +955,7 @@ int check_cache_already_added(const char *cache_device) {
struct cache_device **caches, *curr_cache;
int caches_count, i;
caches = get_cache_devices(&caches_count);
caches = get_cache_devices(&caches_count, false);
if (NULL == caches) {
return SUCCESS;
@ -911,7 +1014,7 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
if (cache_id == 0) {
cache_id = 1;
caches = get_cache_devices(&caches_count);
caches = get_cache_devices(&caches_count, false);
if (caches != NULL) {
psort(caches, caches_count, sizeof(struct cache_device*), caches_compare);
for (i = 0; i < caches_count; ++i) {
@ -928,11 +1031,13 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
cmd.cache_id = cache_id;
cmd.init_cache = cache_init;
strncpy_s(cmd.cache_path_name,
sizeof(cmd.cache_path_name),
cache_device,
strnlen_s(cache_device,
sizeof(cmd.cache_path_name)));
if (set_device_path(cmd.cache_path_name, sizeof(cmd.cache_path_name),
cache_device, MAX_STR_LEN) != SUCCESS) {
cas_printf(LOG_ERR, "Please use correct by-id path to the device "
"%s.\n", cache_device);
close(fd);
return FAILURE;
}
cmd.caching_mode = cache_mode;
cmd.eviction_policy = eviction_policy_type;
cmd.line_size = line_size;
@ -978,7 +1083,7 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
status = SUCCESS;
for (i = 0; i < CORE_ADD_MAX_TIMEOUT; ++i) {
cache = get_cache_device_by_id_fd(cache_id, fd);
cache = get_cache_device_by_id_fd(cache_id, fd, false);
status = FAILURE;
if (cache == NULL) {
@ -1394,7 +1499,7 @@ int check_core_already_cached(const char *core_device) {
if (get_dev_path(core_device, core_device_path, sizeof(core_device_path)))
return SUCCESS;
caches = get_cache_devices(&caches_count);
caches = get_cache_devices(&caches_count, false);
if (NULL == caches) {
return SUCCESS;
@ -1483,7 +1588,7 @@ int get_inactive_core_count(const struct kcas_cache_info *cache_info)
int inactive_cores = 0;
int i;
cache = get_cache_device(cache_info);
cache = get_cache_device(cache_info, false);
if (!cache)
return -1;
@ -1586,7 +1691,7 @@ int illegal_recursive_core(unsigned int cache_id, const char *core_device, int c
* iteration of this loop*/
/* get underlying core device of dev_cache_id-dev_core_id */
cache = get_cache_device_by_id_fd(dev_cache_id, fd);
cache = get_cache_device_by_id_fd(dev_cache_id, fd, false);
if (!cache) {
cas_printf(LOG_ERR, "Failed to extract statistics for "
@ -1619,103 +1724,6 @@ int illegal_recursive_core(unsigned int cache_id, const char *core_device, int c
}
}
/* Indicate whether given entry in /dev/disk/by-id should be ignored -
we ignore software created links like 'lvm-' since these can point to
both CAS exported object and core device depending on initialization order.
*/
static bool dev_link_blacklisted(const char* entry)
{
static const char* const prefix_blacklist[] = {"lvm", "md-name"};
static const unsigned count = ARRAY_SIZE(prefix_blacklist);
const char* curr;
unsigned i;
for (i = 0; i < count; i++) {
curr = prefix_blacklist[i];
if (!strncmp(entry, curr, strnlen_s(curr, MAX_STR_LEN)))
return true;
}
return false;
}
/* get device link starting with /dev/disk/by-id */
static int get_dev_link(const char* disk, char* buf, size_t num)
{
static const char dev_by_id_dir[] = "/dev/disk/by-id";
int err;
struct dirent *entry;
DIR* dir;
char disk_dev[MAX_STR_LEN]; /* input disk device file */
char dev_by_id[MAX_STR_LEN]; /* current device path by id */
char curr_dev[MAX_STR_LEN]; /* current device file - compared against disk_dev[] */
int n;
dir = opendir(dev_by_id_dir);
if (!dir) {
/* no disk available by id? */
cas_printf(LOG_WARNING, "Unable to open disk alias directory.\n");
return FAILURE;
}
if (get_dev_path(disk, disk_dev, sizeof(disk_dev))) {
err = FAILURE;
goto close_dir;
}
err = FAILURE;
while (err != SUCCESS && (entry = readdir(dir))) {
/* check if link is blacklisted */
if (dev_link_blacklisted(entry->d_name))
continue;
/* construct device-by-id path for current device */
n = snprintf(dev_by_id, sizeof(dev_by_id), "%s/%s",
dev_by_id_dir, entry->d_name);
if (n < 0 || n >= sizeof(dev_by_id)) {
cas_printf(LOG_WARNING,
"Error constructing disk device by-link path.\n");
continue;
}
/* get device path for current device */
if (get_dev_path(dev_by_id, curr_dev, sizeof(curr_dev))) {
/* it's normal to have stale links in /dev/ - no log */
continue;
}
/* compare current device path against disk device path */
if (!strncmp(disk_dev, curr_dev, sizeof(curr_dev))) {
if (n >= num) {
cas_printf(LOG_WARNING, "Buffer to short to store device link.\n");
} else {
strncpy_s(buf, num, dev_by_id, sizeof(dev_by_id));
err = SUCCESS;
}
}
}
close_dir:
closedir(dir);
return err;
}
static int set_core_path(char *path, const char *core_device, size_t len)
{
/* attempt to get disk device path by id */
if (get_dev_link(core_device, path, len) == SUCCESS)
return SUCCESS;
/* .. if this failed, try to get standard /dev/sd* path */
if (get_dev_path(core_device, path, len) == SUCCESS)
return SUCCESS;
/* if everything else failed - fall back to user-provided path */
if (!strncpy_s(path, len, core_device, strnlen_s(core_device, MAX_STR_LEN)))
return SUCCESS;
return FAILURE;
}
int add_core(unsigned int cache_id, unsigned int core_id, const char *core_device,
int try_add, int update_path)
{
@ -1745,7 +1753,8 @@ int add_core(unsigned int cache_id, unsigned int core_id, const char *core_devic
}
memset(&cmd, 0, sizeof(cmd));
if (set_core_path(cmd.core_path_name, core_device, MAX_STR_LEN) != SUCCESS) {
if (set_device_path(cmd.core_path_name, sizeof(cmd.core_path_name),
core_device, MAX_STR_LEN) != SUCCESS) {
cas_printf(LOG_ERR, "Failed to copy core path\n");
return FAILURE;
}
@ -1758,7 +1767,7 @@ int add_core(unsigned int cache_id, unsigned int core_id, const char *core_devic
if (fd == -1)
return FAILURE;
/* check for illegal rec ursive caching config. */
/* check for illegal recursive caching config. */
if (illegal_recursive_core(cache_id, user_core_path,
user_core_path_size, fd)) {
close(fd);
@ -1894,7 +1903,8 @@ int core_pool_remove(const char *core_device)
if (fd == -1)
return FAILURE;
if (set_core_path(cmd.core_path_name, core_device, MAX_STR_LEN) != SUCCESS) {
if (set_device_path(cmd.core_path_name, sizeof(cmd.core_path_name),
core_device, MAX_STR_LEN) != SUCCESS) {
cas_printf(LOG_ERR, "Failed to copy core path\n");
close(fd);
return FAILURE;
@ -2638,7 +2648,7 @@ error_out:
return result;
}
int list_caches(unsigned int list_format)
int list_caches(unsigned int list_format, bool by_id_path)
{
struct cache_device **caches, *curr_cache;
struct kcas_core_pool_path core_pool_path_cmd = {0};
@ -2650,7 +2660,7 @@ int list_caches(unsigned int list_format)
pthread_t thread;
struct list_printout_ctx printout_ctx;
caches = get_cache_devices(&caches_count);
caches = get_cache_devices(&caches_count, by_id_path);
if (caches_count < 0) {
cas_printf(LOG_INFO, "Error getting caches list\n");
return FAILURE;
@ -2705,9 +2715,12 @@ int list_caches(unsigned int list_format)
"-" /* device */);
for (i = 0; i < core_pool_path_cmd.core_pool_count; i++) {
char *core_path = core_pool_path_cmd.core_path_tab + (MAX_STR_LEN * i);
if (!by_id_path) {
if (get_dev_path(core_path, core_path, MAX_STR_LEN)) {
cas_printf(LOG_WARNING, "WARNING: Can not resolve path to core. "
"By-id path will be shown for that core.\n");
cas_printf(LOG_WARNING, "WARNING: Can not resolve path "
"to core. By-id path will be shown for that "
"core.\n");
}
}
fprintf(intermediate_file[1], TAG(TREE_LEAF)
"%s,%s,%s,%s,%s,%s\n",
@ -2729,7 +2742,10 @@ int list_caches(unsigned int list_format)
float cache_flush_prog;
float core_flush_prog;
get_dev_path(curr_cache->device, curr_cache->device, sizeof(curr_cache->device));
if (!by_id_path) {
get_dev_path(curr_cache->device, curr_cache->device,
sizeof(curr_cache->device));
}
cache_flush_prog = calculate_flush_progress(curr_cache->dirty, curr_cache->flushed);
if (cache_flush_prog) {
@ -2805,8 +2821,10 @@ int _check_cache_device(const char *device_path,
{
int result, fd;
strncpy_s(cmd_info->path_name, sizeof(cmd_info->path_name), device_path,
strnlen_s(device_path, sizeof(cmd_info->path_name)));
if (set_device_path(cmd_info->path_name, sizeof(cmd_info->path_name),
device_path, MAX_STR_LEN) != SUCCESS) {
return FAILURE;
}
fd = open_ctrl_device();
if (fd == -1)

View File

@ -26,7 +26,7 @@
#define FAILURE 1 /**< default non-zero exit code. */
#define INTERRUPTED 2 /**< if command is interrupted */
#define SUCCESS 0 /**< 0 exit code from majority of our functions
#define SUCCESS 0 /**< 0 exit code from majority of our functions \
stands for success */
struct core_device {
@ -210,7 +210,7 @@ int set_cache_mode(unsigned int cache_state, unsigned int cache_id, int flush);
*/
int add_core(unsigned int cache_id, unsigned int core_id, const char *core_device, int try_add, int update_path);
int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info);
int get_core_info(int fd, int cache_id, int core_id, struct kcas_core_info *info, bool by_id_path);
int remove_core(unsigned int cache_id, unsigned int core_id,
bool detach, bool force_no_flush);
@ -234,16 +234,16 @@ int partition_is_name_valid(const char *name);
int cas_module_version(char *buff, int size);
int disk_module_version(char *buff, int size);
int list_caches(unsigned int list_format);
int list_caches(unsigned int list_format, bool by_id_path);
int cache_status(unsigned int cache_id, unsigned int core_id, int io_class_id,
unsigned int stats_filters, unsigned int stats_format);
unsigned int stats_filters, unsigned int stats_format, bool by_id_path);
int get_inactive_core_count(const struct kcas_cache_info *cache_info);
int open_ctrl_device_quiet();
int open_ctrl_device();
int *get_cache_ids(int *cache_count);
struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd);
struct cache_device **get_cache_devices(int *caches_count);
struct cache_device *get_cache_device_by_id_fd(int cache_id, int fd, bool by_id_path);
struct cache_device **get_cache_devices(int *caches_count, bool by_id_path);
void free_cache_devices_list(struct cache_device **caches, int caches_count);
int validate_dev(const char *dev_path);
@ -298,6 +298,17 @@ void print_err(int error_code);
*/
int get_dev_path(const char* disk, char* buf, size_t num);
/**
* @brief make sure device link is unique and write sanitized version to \a dest_path
*
* @param[in] src_path link to device
* @param[in] src_len length of \a src_path
* @param[in] dest_len max length of \a dest_path
* @param[out] dest_path sanitized absolute path
* @return 0 on success, nonzero on failure
*/
int set_device_path(char *dest_path, size_t dest_len, const char *src_path, size_t src_len);
/**
* @brief convert string to int
*/

View File

@ -60,6 +60,7 @@ struct command_args{
uint32_t params_type;
uint32_t params_count;
bool verbose;
bool by_id_path;
};
static struct command_args command_args_values = {
@ -83,6 +84,7 @@ static struct command_args command_args_values = {
.no_flush = false,
.cache_device = NULL,
.core_device = NULL,
.by_id_path = false,
.params_type = 0,
.params_count = 0,
@ -153,6 +155,8 @@ int command_handle_option(char *opt, const char **arg)
command_args_values.detach = true;
} else if (!strcmp(opt, "no-flush")) {
command_args_values.no_flush = true;
} else if (!strcmp(opt, "by-id-path")) {
command_args_values.by_id_path = true;
} else {
return FAILURE;
}
@ -344,12 +348,13 @@ int handle_start()
static cli_option list_options[] = {
{'o', "output-format", "Output format: {table|csv}", 1, "FORMAT", 0},
{'b', "by-id-path", "Display by-id path to disks instead of short form /dev/sdx"},
{0}
};
int handle_list()
{
return list_caches(command_args_values.output_format);
return list_caches(command_args_values.output_format, command_args_values.by_id_path);
}
static cli_option stats_options[] = {
@ -358,6 +363,7 @@ static cli_option stats_options[] = {
{'d', "io-class-id", "Display per IO class statistics", 1, "ID", CLI_OPTION_OPTIONAL_ARG},
{'f', "filter", "Apply filters from the following set: {all, conf, usage, req, blk, err}", 1, "FILTER-SPEC"},
{'o', "output-format", "Output format: {table|csv}", 1, "FORMAT"},
{'b', "by-id-path", "Display by-id path to disks instead of short form /dev/sdx"},
{0}
};
@ -396,6 +402,10 @@ int stats_command_handle_option(char *opt, const char **arg)
command_args_values.output_format = validate_str_output_format(arg[0]);
if (OUTPUT_FORMAT_INVALID == command_args_values.output_format)
return FAILURE;
} else if (!strcmp(opt, "by-id-path")) {
command_args_values.by_id_path = true;
if (command_args_values.by_id_path == false)
return FAILURE;
} else {
return FAILURE;
}
@ -409,7 +419,8 @@ int handle_stats()
command_args_values.core_id,
command_args_values.io_class_id,
command_args_values.stats_filters,
command_args_values.output_format);
command_args_values.output_format,
command_args_values.by_id_path);
}
static cli_option stop_options[] = {

View File

@ -516,7 +516,7 @@ int cache_stats_ioclasses(int ctrl_fd, const struct kcas_cache_info *cache_info,
}
int cache_stats_conf(int ctrl_fd, const struct kcas_cache_info *cache_info,
unsigned int cache_id, FILE *outfile)
unsigned int cache_id, FILE *outfile, bool by_id_path)
{
float flush_progress = 0;
float value;
@ -526,10 +526,10 @@ int cache_stats_conf(int ctrl_fd, const struct kcas_cache_info *cache_info,
char dev_path[MAX_STR_LEN];
int inactive_cores;
if (get_dev_path(cache_info->cache_path_name, dev_path, sizeof(dev_path)) != SUCCESS)
cache_path = cache_info->cache_path_name;
else
if (!by_id_path && get_dev_path(cache_info->cache_path_name, dev_path, sizeof(dev_path)) == SUCCESS)
cache_path = dev_path;
else
cache_path = cache_info->cache_path_name;
flush_progress = calculate_flush_progress(cache_info->info.dirty,
cache_info->info.flushed);
@ -602,7 +602,8 @@ void cache_stats_counters(struct kcas_get_stats *cache_stats, FILE *outfile,
}
static int cache_stats(int ctrl_fd, const struct kcas_cache_info *cache_info,
unsigned int cache_id, FILE *outfile, unsigned int stats_filters)
unsigned int cache_id, FILE *outfile, unsigned int stats_filters,
bool by_id_path)
{
struct kcas_get_stats cache_stats = {};
cache_stats.cache_id = cache_id;
@ -615,7 +616,7 @@ static int cache_stats(int ctrl_fd, const struct kcas_cache_info *cache_info,
begin_record(outfile);
if (stats_filters & STATS_FILTER_CONF)
cache_stats_conf(ctrl_fd, cache_info, cache_id, outfile);
cache_stats_conf(ctrl_fd, cache_info, cache_id, outfile, by_id_path);
if (stats_filters & STATS_FILTER_USAGE)
print_usage_stats(&cache_stats.usage, outfile);
@ -633,12 +634,12 @@ static int cache_stats(int ctrl_fd, const struct kcas_cache_info *cache_info,
int cache_stats_cores(int ctrl_fd, const struct kcas_cache_info *cache_info,
unsigned int cache_id, unsigned int core_id, int io_class_id,
FILE *outfile, unsigned int stats_filters)
FILE *outfile, unsigned int stats_filters, bool by_id_path)
{
struct kcas_core_info core_info;
struct kcas_get_stats stats;
if (get_core_info(ctrl_fd, cache_id, core_id, &core_info)) {
if (get_core_info(ctrl_fd, cache_id, core_id, &core_info, by_id_path)) {
cas_printf(LOG_ERR, "Error while retrieving stats for core %d\n", core_id);
print_err(core_info.ext_err_code);
return FAILURE;
@ -695,7 +696,7 @@ void *stats_printout(void *ctx)
* @return SUCCESS upon successful printing of statistic. FAILURE if any error happens
*/
int cache_status(unsigned int cache_id, unsigned int core_id, int io_class_id,
unsigned int stats_filters, unsigned int output_format)
unsigned int stats_filters, unsigned int output_format, bool by_id_path)
{
int ctrl_fd, i;
int ret = SUCCESS;
@ -763,13 +764,14 @@ int cache_status(unsigned int cache_id, unsigned int core_id, int io_class_id,
}
} else if (core_id == OCF_CORE_ID_INVALID) {
if (cache_stats(ctrl_fd, &cache_info, cache_id, intermediate_file[1],
stats_filters)) {
stats_filters, by_id_path)) {
ret = FAILURE;
goto cleanup;
}
} else {
if (cache_stats_cores(ctrl_fd, &cache_info, cache_id, core_id,
io_class_id, intermediate_file[1], stats_filters)) {
io_class_id, intermediate_file[1],
stats_filters, by_id_path)) {
ret = FAILURE;
goto cleanup;
}

View File

@ -19,12 +19,12 @@ class Cache:
self.__metadata_size = None
def __get_cache_id(self):
cmd = f"{list_cmd()} | grep {self.cache_device.system_path}"
cmd = f"{list_cmd(by_id_path=False)} | grep {self.cache_device.short_path}"
output = TestRun.executor.run(cmd)
if output.exit_code == 0 and output.stdout.strip():
return output.stdout.split()[1]
else:
raise Exception(f"There is no cache started on {self.cache_device.system_path}.")
raise Exception(f"There is no cache started on {self.cache_device.path}.")
def get_core_devices(self):
return get_cores(self.cache_id)

View File

@ -35,7 +35,7 @@ def start_cache(cache_dev: Device, cache_mode: CacheMode = None,
_cache_id = None if cache_id is None else str(cache_id)
_cache_mode = None if cache_mode is None else cache_mode.name.lower()
output = TestRun.executor.run(start_cmd(
cache_dev=cache_dev.system_path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_dev=cache_dev.path, cache_mode=_cache_mode, cache_line_size=_cache_line_size,
cache_id=_cache_id, force=force, load=load, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to start cache.", output)
@ -53,11 +53,11 @@ def stop_cache(cache_id: int, no_data_flush: bool = False, shortcut: bool = Fals
def add_core(cache: Cache, core_dev: Device, core_id: int = None, shortcut: bool = False):
_core_id = None if core_id is None else str(core_id)
output = TestRun.executor.run(
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.system_path,
add_core_cmd(cache_id=str(cache.cache_id), core_dev=core_dev.path,
core_id=_core_id, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to add core.", output)
core = Core(core_dev.system_path, cache.cache_id)
core = Core(core_dev.path, cache.cache_id)
return core
@ -71,18 +71,18 @@ def remove_core(cache_id: int, core_id: int, force: bool = False, shortcut: bool
def remove_detached(core_device: Device, shortcut: bool = False):
output = TestRun.executor.run(
remove_detached_cmd(core_device=core_device.system_path, shortcut=shortcut))
remove_detached_cmd(core_device=core_device.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to remove detached core.", output)
return output
def try_add(core_device: Device, cache_id: int, core_id: int = None):
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.system_path,
output = TestRun.executor.run(script_try_add_cmd(str(cache_id), core_device.path,
str(core_id) if core_id is not None else None))
if output.exit_code != 0:
raise CmdException("Failed to execute try add script command.", output)
return Core(core_device.system_path, cache_id)
return Core(core_device.path, cache_id)
def purge_cache(cache_id: int):
@ -128,16 +128,17 @@ def flush(cache_id: int, core_id: int = None, shortcut: bool = False):
def load_cache(device: Device, shortcut: bool = False):
output = TestRun.executor.run(
load_cmd(cache_dev=device.system_path, shortcut=shortcut))
load_cmd(cache_dev=device.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to load cache.", output)
return Cache(device)
def list_caches(output_format: OutputFormat = None, shortcut: bool = False):
def list_caches(output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
output = TestRun.executor.run(
list_cmd(output_format=_output_format, shortcut=shortcut))
list_cmd(output_format=_output_format, by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to list caches.", output)
return output
@ -154,7 +155,7 @@ def print_version(output_format: OutputFormat = None, shortcut: bool = False):
def zero_metadata(cache_dev: Device, shortcut: bool = False):
output = TestRun.executor.run(
zero_metadata_cmd(cache_dev=cache_dev.system_path, shortcut=shortcut))
zero_metadata_cmd(cache_dev=cache_dev.path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Failed to wipe metadata.", output)
return output
@ -179,7 +180,8 @@ def remove_all_detached_cores():
def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = False,
io_class_id: int = None, filter: List[StatsFilter] = None,
output_format: OutputFormat = None, shortcut: bool = False):
output_format: OutputFormat = None, by_id_path: bool = True,
shortcut: bool = False):
_output_format = None if output_format is None else output_format.name
_core_id = None if core_id is None else str(core_id)
_io_class_id = None if io_class_id is None else str(io_class_id)
@ -192,7 +194,8 @@ def print_statistics(cache_id: int, core_id: int = None, per_io_class: bool = Fa
print_statistics_cmd(
cache_id=str(cache_id), core_id=_core_id,
per_io_class=per_io_class, io_class_id=_io_class_id,
filter=_filter, output_format=_output_format, shortcut=shortcut))
filter=_filter, output_format=_output_format,
by_id_path=by_id_path, shortcut=shortcut))
if output.exit_code != 0:
raise CmdException("Printing statistics failed.", output)
return output

View File

@ -2,24 +2,20 @@
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import csv
import csv
import json
import re
from api.cas import casadm
from test_utils.output import CmdException
from test_utils.size import parse_unit
from storage_devices.device import Device
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.version import CasVersion
from datetime import timedelta
from typing import List
from api.cas import casadm
from api.cas.cache_config import *
from api.cas.casadm_params import *
from api.cas.version import CasVersion
from storage_devices.device import Device
from test_utils.size import parse_unit
from test_utils.output import CmdException
class Stats(dict):

View File

@ -95,7 +95,8 @@ def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = Non
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False):
output_format: str = None, by_id_path: bool = True,
shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
@ -109,6 +110,8 @@ def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command
@ -126,10 +129,12 @@ def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False)
return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False):
def list_cmd(output_format: str = None, by_id_path: bool = True, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
if by_id_path:
command += (" -b " if shortcut else " --by-id-path ")
return casadm_bin + command

View File

@ -26,7 +26,7 @@ SEQ_CUT_OFF_THRESHOLD_DEFAULT = Size(1, Unit.MebiByte)
class Core(Device):
def __init__(self, core_device: str, cache_id: int):
self.core_device = Device(core_device)
self.system_path = None
self.path = None
core_info = self.__get_core_info()
if core_info["core_id"] != "-":
self.core_id = int(core_info["core_id"])
@ -38,14 +38,14 @@ class Core(Device):
def __get_core_info(self):
output = TestRun.executor.run(
list_cmd(OutputFormat.csv.name))
list_cmd(OutputFormat.csv.name, by_id_path=False))
if output.exit_code != 0:
raise Exception("Failed to execute list caches command.")
output_lines = output.stdout.splitlines()
for line in output_lines:
split_line = line.split(',')
if split_line[0] == "core" and (split_line[2] == self.core_device.system_path
or split_line[5] == self.system_path):
if split_line[0] == "core" and (split_line[2] == self.core_device.short_path
or split_line[5] == self.path):
return {"core_id": split_line[1],
"core_device": split_line[2],
"status": split_line[3],
@ -132,7 +132,7 @@ class Core(Device):
def check_if_is_present_in_os(self, should_be_visible=True):
device_in_system_message = "CAS device exists in OS."
device_not_in_system_message = "CAS device does not exist in OS."
item = fs_utils.ls_item(f"{self.system_path}")
item = fs_utils.ls_item(f"{self.path}")
if item is not None:
if should_be_visible:
TestRun.LOGGER.info(device_in_system_message)

View File

@ -70,11 +70,8 @@ class CacheConfigLine:
self.extra_flags = extra_flags
def __str__(self):
cache_symlink = self.cache_device.get_device_link("/dev/disk/by-id")
cache_device_path = (
cache_symlink.full_path if cache_symlink is not None else self.cache_device.system_path
)
params = [str(self.cache_id), cache_device_path, self.cache_mode.name, self.extra_flags]
params = [str(self.cache_id), self.cache_device.path,
self.cache_mode.name, self.extra_flags]
return '\t'.join(params)
@ -88,9 +85,6 @@ class CoreConfigLine:
self.extra_flags = extra_flags
def __str__(self):
core_symlink = self.core_device.get_device_link("/dev/disk/by-id")
core_device_path = (
core_symlink.full_path if core_symlink is not None else self.core_device.system_path
)
params = [str(self.cache_id), str(self.core_id), core_device_path, self.extra_flags]
params = [str(self.cache_id), str(self.core_id),
self.core_device.path, self.extra_flags]
return '\t'.join(params)

@ -1 +1 @@
Subproject commit cd1c19c5636f47b832a44040448d4a9f3e690aec
Subproject commit 13b7361c25de004fa426c12007d58731270d3cb3

View File

@ -73,7 +73,7 @@ def test_cleaning_policies_in_write_back(cleaning_policy):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].system_path)
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)
core_writes_before_wait_for_cleaning = (
@ -138,7 +138,7 @@ def test_cleaning_policies_in_write_through(cleaning_policy):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for i in range(cores_count):
fio.add_job().target(core[i].system_path)
fio.add_job().target(core[i].path)
fio.run()
time.sleep(3)

View File

@ -57,13 +57,13 @@ def test_concurrent_cores_flush(cache_mode):
block_size = Size(4, Unit.MebiByte)
count = int(cache_size.value / 2 / block_size.value)
dd_pid = Dd().output(core1.system_path) \
dd_pid = Dd().output(core1.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \
.run_in_background()
Dd().output(core2.system_path) \
Dd().output(core2.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \
@ -160,7 +160,7 @@ def test_concurrent_caches_flush(cache_mode):
count = int(cache_size.value / block_size.value)
total_saturation = block_size * count
for core in cores:
Dd().output(core.system_path) \
Dd().output(core.path) \
.input("/dev/urandom") \
.block_size(block_size) \
.count(count) \

View File

@ -363,7 +363,7 @@ def fio_prepare(core, io_mode, io_size=io_size):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_mode)
.target(core.system_path)
.target(core.path)
.direct(1)
)
return fio

View File

@ -84,14 +84,14 @@ def test_seq_cutoff_multi_core(thresholds_list, cache_mode, io_type, io_type_las
fio_job = fio.add_job(job_name=f"core_{core.core_id}")
fio_job.size(io_sizes[i])
fio_job.read_write(io_type)
fio_job.target(core.system_path)
fio_job.target(core.path)
writes_before.append(core.get_statistics().block_stats.cache.writes)
# Run random IO against the last core
fio_job = fio.add_job(job_name=f"core_{cores[-1].core_id}")
fio_job.size(io_sizes[-1])
fio_job.read_write(io_type_last)
fio_job.target(cores[-1].system_path)
fio_job.target(cores[-1].path)
writes_before.append(cores[-1].get_statistics().block_stats.cache.writes)
with TestRun.step("Running IO against all cores"):
@ -150,7 +150,7 @@ def test_seq_cutoff_thresh(threshold_param, cls, io_dir, policy, verify_type):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()
@ -194,7 +194,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio)
.size(cache.cache_device.size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()
@ -218,7 +218,7 @@ def test_seq_cutoff_thresh_fill(threshold_param, cls, io_dir):
.io_engine(IoEngine.libaio)
.size(io_size)
.read_write(io_dir)
.target(f"{cores[0].system_path}")
.target(f"{cores[0].path}")
.direct()
).run()

View File

@ -44,7 +44,7 @@ def test_purge(purge_target):
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(100)
.block_size(Size(1, Unit.Blocks512))
.oflag("direct")

View File

@ -40,10 +40,10 @@ def test_cli_start_stop_default_id(shortcut):
if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path:
if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}."
f"\nShould use {cache_device.system_path}.")
f" {cache.cache_device.path}."
f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@ -83,10 +83,10 @@ def test_cli_start_stop_custom_id(shortcut):
if len(caches) != 1:
TestRun.fail(f"There is a wrong number of caches found in the OS: {len(caches)}. "
f"Should be only 1.")
if cache.cache_device.system_path != cache_device.system_path:
if cache.cache_device.path != cache_device.path:
TestRun.fail(f"The cache has started using a wrong device:"
f" {cache.cache_device.system_path}."
f"\nShould use {cache_device.system_path}.")
f" {cache.cache_device.path}."
f"\nShould use {cache_device.path}.")
with TestRun.step("Stop the cache."):
casadm.stop_cache(cache.cache_id, shortcut=shortcut)
@ -127,7 +127,7 @@ def test_cli_add_remove_default_id(shortcut):
caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path:
if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."):
@ -180,7 +180,7 @@ def test_cli_add_remove_custom_id(shortcut):
caches = casadm_parser.get_caches()
if len(caches[0].get_core_devices()) != 1:
TestRun.fail("One core should be present in the cache.")
if caches[0].get_core_devices()[0].system_path != core.system_path:
if caches[0].get_core_devices()[0].path != core.path:
TestRun.fail("The core path should be equal to the path of the core added.")
with TestRun.step("Remove the core from the cache."):
@ -227,7 +227,7 @@ def test_cli_load_and_force(shortcut):
with TestRun.step("Try to load cache with 'force'."):
output = TestRun.executor.run(
start_cmd(cache_dev=cache_device.system_path, force=True, load=True, shortcut=shortcut)
start_cmd(cache_dev=cache_device.path, force=True, load=True, shortcut=shortcut)
)
if output.exit_code == 0:
TestRun.fail("Loading cache with 'force' option should fail.")

View File

@ -292,7 +292,7 @@ def check_seqcutoff_parameters(core, seqcutoff_params):
if failed_params:
TestRun.LOGGER.error(
f"Sequential cut-off parameters are not correct "
f"for {core.system_path}:\n{failed_params}"
f"for {core.path}:\n{failed_params}"
)

View File

@ -188,18 +188,18 @@ def base_prepare(item):
# stop only those RAIDs, which are comprised of test disks
if all(map(lambda device:
any(map(lambda disk_path:
disk_path in device.system_path,
[bd.system_path for bd in TestRun.dut.disks])),
disk_path in device.path,
[bd.path for bd in TestRun.dut.disks])),
raid.array_devices)):
raid.umount_all_partitions()
raid.remove_partitions()
raid.stop()
for device in raid.array_devices:
Mdadm.zero_superblock(device.system_path)
Mdadm.zero_superblock(device.path)
for disk in TestRun.dut.disks:
disk.umount_all_partitions()
Mdadm.zero_superblock(disk.system_path)
Mdadm.zero_superblock(disk.path)
TestRun.executor.run_expect_success("udevadm settle")
disk.remove_partitions()
create_partition_table(disk, PartitionTable.gpt)

View File

@ -40,10 +40,10 @@ def test_data_integrity_12h(cache_mode):
cache, core = prepare(cache_mode)
with TestRun.step("Fill cache"):
fill_cache(core.system_path)
fill_cache(core.path)
with TestRun.step("Run test workloads with verification"):
run_workload(core.system_path)
run_workload(core.path)
def prepare(cache_mode):

View File

@ -71,12 +71,12 @@ def test_data_integrity_5d_dss(filesystems):
with TestRun.step("Create filesystems and mount cores"):
for i, core in enumerate(cores):
mount_point = core.system_path.replace('/dev/', '/mnt/')
mount_point = core.path.replace('/dev/', '/mnt/')
if not fs_utils.check_if_directory_exists(mount_point):
fs_utils.create_directory(mount_point)
TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.system_path}")
TestRun.LOGGER.info(f"Create filesystem {filesystems[i].name} on {core.path}")
core.create_filesystem(filesystems[i])
TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.system_path} to "
TestRun.LOGGER.info(f"Mount filesystem {filesystems[i].name} on {core.path} to "
f"{mount_point}")
core.mount(mount_point)
sync()

View File

@ -29,7 +29,7 @@ def test_another_cache_with_same_id():
cache_dev_1.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_success(
cli.start_cmd(
cache_dev_1.partitions[0].system_path, cache_id="1", force=True
cache_dev_1.partitions[0].path, cache_id="1", force=True
)
)
@ -38,7 +38,7 @@ def test_another_cache_with_same_id():
cache_dev_2.create_partitions([Size(2, Unit.GibiByte)])
TestRun.executor.run_expect_fail(
cli.start_cmd(
cache_dev_2.partitions[0].system_path, cache_id="1", force=True
cache_dev_2.partitions[0].path, cache_id="1", force=True
)
)
@ -69,7 +69,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_success(
cli.add_core_cmd(
cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_1.partitions[0].system_path}",
core_dev=f"{core_dev_1.partitions[0].path}",
core_id="1",
)
)
@ -80,7 +80,7 @@ def test_another_core_with_same_id():
TestRun.executor.run_expect_fail(
cli.add_core_cmd(
cache_id=f"{cache.cache_id}",
core_dev=f"{core_dev_2.partitions[0].system_path}",
core_dev=f"{core_dev_2.partitions[0].path}",
core_id="1",
)
)

View File

@ -202,7 +202,7 @@ def test_one_core_fail(cache_mode):
with TestRun.step("Check if core device is really out of cache."):
output = str(casadm.list_caches().stdout.splitlines())
if core_part1.system_path in output:
if core_part1.path in output:
TestRun.fail("The first core device should be unplugged!")
with TestRun.step("Check if the remaining core is able to use cache."):
@ -232,7 +232,7 @@ def dd_builder(cache_mode: CacheMode, dev: Core, size: Size):
.block_size(block_size)
.count(blocks))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(dev.system_path).output("/dev/null")
dd.input(dev.path).output("/dev/null")
else:
dd.input("/dev/urandom").output(dev.system_path)
dd.input("/dev/urandom").output(dev.path)
return dd

View File

@ -66,7 +66,7 @@ def test_stop_no_flush_load_cache(cache_mode, filesystem):
with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)
@ -127,7 +127,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Fill exported object with data."):
dd = (Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.block_size(Size(1, Unit.Blocks4096))
.oflag("direct"))
dd.run()
@ -143,7 +143,7 @@ def test_stop_no_flush_load_cache_no_fs(cache_mode):
with TestRun.step("Try to start cache without loading metadata."):
output = TestRun.executor.run_expect_fail(cli.start_cmd(
cache_dev=str(cache_part.system_path), cache_mode=str(cache_mode.name.lower()),
cache_dev=str(cache_part.path), cache_mode=str(cache_mode.name.lower()),
force=False, load=False))
cli_messages.check_stderr_msg(output, cli_messages.start_cache_with_existing_metadata)

View File

@ -156,13 +156,13 @@ def test_add_cached_core(cache_mode):
with TestRun.step("Try adding the same core device to the second cache instance."):
output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.system_path),
cli.add_core_cmd(cache_id=str(cache2.cache_id), core_dev=str(core_part.path),
core_id=str(core.core_id)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Try adding the same core device to the same cache for the second time."):
output = TestRun.executor.run_expect_fail(
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.system_path)))
cli.add_core_cmd(cache_id=str(cache1.cache_id), core_dev=str(core_part.path)))
cli_messages.check_stderr_msg(output, cli_messages.add_cached_core)
with TestRun.step("Stop caches."):

View File

@ -37,7 +37,7 @@ def test_remove_core_during_io():
.io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct(1)
.run_time(timedelta(minutes=4))
.time_based()
@ -78,7 +78,7 @@ def test_stop_cache_during_io():
.io_engine(IoEngine.libaio)
.block_size(Size(4, Unit.KibiByte))
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct(1)
.run_time(timedelta(minutes=4))
.time_based()

View File

@ -205,7 +205,7 @@ def fio_prepare(core):
.create_command()
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.randrw)
.target(core.system_path)
.target(core.path)
.continue_on_error(ErrorFilter.io)
.direct(1)
.run_time(timedelta(seconds=30))

View File

@ -72,7 +72,7 @@ def test_core_inactive_stats():
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1000)
.block_size(Size(4, Unit.KibiByte))
).run()

View File

@ -126,7 +126,7 @@ def test_flush_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to CAS device."):
run_fio([first_core.system_path, second_core.system_path])
run_fio([first_core.path, second_core.path])
with TestRun.step("Stop cache without flushing dirty data."):
cache.stop(no_data_flush=True)
@ -273,7 +273,7 @@ def test_load_cache_with_inactive_core():
plug_device.unplug()
with TestRun.step("Load cache."):
output = TestRun.executor.run(cli.load_cmd(cache_dev.system_path))
output = TestRun.executor.run(cli.load_cmd(cache_dev.path))
cli_messages.check_stderr_msg(output, cli_messages.load_inactive_core_missing)
with TestRun.step("Plug missing device and stop cache."):
@ -429,7 +429,7 @@ def test_print_statistics_inactive(cache_mode):
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run IO."):
run_fio([first_core.system_path, second_core.system_path])
run_fio([first_core.path, second_core.path])
with TestRun.step("Print statistics and check if there is no inactive usage section."):
active_stats = cache.get_statistics()
@ -460,7 +460,7 @@ def test_print_statistics_inactive(cache_mode):
time.sleep(1)
first_core_status = first_core.get_status()
if first_core_status != CoreStatus.active:
TestRun.fail(f"Core {first_core.system_path} should be in active state but it is not. "
TestRun.fail(f"Core {first_core.path} should be in active state but it is not. "
f"Actual state: {first_core_status}.")
with TestRun.step("Check cache statistics section of inactive devices."):
@ -543,7 +543,7 @@ def test_remove_detached_cores():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two contain "
"dirty data."):
@ -577,7 +577,7 @@ def test_remove_detached_cores():
with TestRun.step("Verify that cores are no longer listed."):
output = casadm.list_caches().stdout
for dev in core_devs:
if dev.system_path in output:
if dev.path in output:
TestRun.fail(f"CAS device is still listed in casadm list output:\n{output}")
@ -612,7 +612,7 @@ def test_remove_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes to all CAS devices."):
run_fio([c.system_path for c in cores])
run_fio([c.path for c in cores])
with TestRun.step("Flush dirty data from two CAS devices and verify than other two "
"contain dirty data."):
@ -657,7 +657,7 @@ def test_remove_inactive_devices():
"dirty CAS device as expected.")
cli_messages.check_stderr_msg(e.output, cli_messages.remove_inactive_core)
output = casadm.list_caches().stdout
if core.system_path not in output:
if core.path not in output:
TestRun.fail(f"CAS device is not listed in casadm list output but it should be."
f"\n{output}")
core.remove_core(force=True)
@ -695,7 +695,7 @@ def test_stop_cache_with_inactive_devices():
InitConfig.create_init_config_from_running_configuration()
with TestRun.step("Run random writes and verify that CAS device contains dirty data."):
run_fio([core.system_path])
run_fio([core.path])
if core.get_dirty_blocks() == Size.zero():
TestRun.fail("There is no dirty data on core device.")

View File

@ -203,10 +203,10 @@ def test_udev_cache_load(cache_mode):
elif len(caches) > 1:
caches_list = '\n'.join(caches)
TestRun.fail(f"There is more than 1 cache loaded:\n{caches_list}")
elif caches[0].cache_device.system_path != cache_dev.system_path:
elif caches[0].cache_device.path != cache_dev.path:
TestRun.fail(f"Cache loaded on wrong device. "
f"Actual: {caches[0].cache_device.system_path}, "
f"expected: {cache_dev.system_path}")
f"Actual: {caches[0].cache_device.path}, "
f"expected: {cache_dev.path}")
elif caches[0].get_cache_mode() != cache_mode:
TestRun.fail(f"Cache did load with different cache mode. "
f"Actual: {caches[0].get_cache_mode()}, expected: {cache_mode}")
@ -268,7 +268,7 @@ def test_neg_udev_cache_load():
if len(cas_devices["caches"]) != 1:
TestRun.LOGGER.error(f"There is wrong number of caches. Expected: 1, actual: "
f"{len(cas_devices['caches'])}")
elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].system_path or \
elif cas_devices["caches"][1]["device"] != cache_disk.partitions[0].path or \
CacheStatus[(cas_devices["caches"][1]["status"]).lower()] != CacheStatus.running:
TestRun.LOGGER.error(f"Cache did not load properly: {cas_devices['caches'][1]}")
if len(cas_devices["cores"]) != 2:
@ -277,7 +277,7 @@ def test_neg_udev_cache_load():
correct_core_devices = []
for i in first_cache_core_numbers:
correct_core_devices.append(core_disk.partitions[i].system_path)
correct_core_devices.append(core_disk.partitions[i].path)
for core in cas_devices["cores"].values():
if core["device"] not in correct_core_devices or \
CoreStatus[core["status"].lower()] != CoreStatus.active or \
@ -297,7 +297,7 @@ def test_neg_udev_cache_load():
core_pool_expected_devices = []
for i in range(0, cores_count):
if i not in first_cache_core_numbers:
core_pool_expected_devices.append(core_disk.partitions[i].system_path)
core_pool_expected_devices.append(core_disk.partitions[i].path)
for c in cas_devices["core_pool"]:
if c["device"] not in core_pool_expected_devices:
TestRun.LOGGER.error(f"Wrong core device added to core pool: {c}.")
@ -305,11 +305,11 @@ def test_neg_udev_cache_load():
def check_if_dev_in_core_pool(dev, should_be_in_core_pool=True):
cas_devices_dict = casadm_parser.get_cas_devices_dict()
is_in_core_pool = any(dev.system_path == d["device"] for d in cas_devices_dict["core_pool"])
is_in_core_pool = any(dev.path == d["device"] for d in cas_devices_dict["core_pool"])
if not (should_be_in_core_pool ^ is_in_core_pool):
TestRun.LOGGER.info(f"Core device {dev.system_path} is"
TestRun.LOGGER.info(f"Core device {dev.path} is"
f"{'' if should_be_in_core_pool else ' not'} listed in core pool "
f"as expected.")
else:
TestRun.fail(f"Core device {dev.system_path} is{' not' if should_be_in_core_pool else ''} "
TestRun.fail(f"Core device {dev.path} is{' not' if should_be_in_core_pool else ''} "
f"listed in core pool.")

View File

@ -64,7 +64,7 @@ def test_load_after_clean_shutdown(reboot_type, cache_mode, filesystem):
else:
power_control = TestRun.plugin_manager.get_plugin('power_control')
power_control.power_cycle()
cache_dev.system_path = cache_dev_link.get_target()
cache_dev.path = cache_dev_link.get_target()
with TestRun.step("Load cache."):
casadm.load_cache(cache_dev)

View File

@ -86,7 +86,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@ -134,7 +134,7 @@ def test_load_x_to_one_without_params(cache_mode, cleaning_policy, cache_line_si
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):
@ -213,7 +213,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@ -261,7 +261,7 @@ def test_load_x_to_one_with_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):
@ -347,7 +347,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Stop cache."):
@ -403,7 +403,7 @@ def test_load_x_to_one_diff_params(cache_mode, cleaning_policy, cache_line_size,
.num_jobs(cores_amount)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
fio.run()
with TestRun.step("Check if there are no error statistics."):

View File

@ -51,7 +51,7 @@ def test_load_occupied_id():
caches = casadm_parser.get_caches()
if len(caches) != 1:
TestRun.LOGGER.error("Inappropriate number of caches after load!")
if caches[0].cache_device.system_path != cache_device_2.system_path:
if caches[0].cache_device.path != cache_device_2.path:
TestRun.LOGGER.error("Wrong cache device system path!")
if caches[0].cache_id != 1:
TestRun.LOGGER.error("Wrong cache id.")

View File

@ -42,7 +42,7 @@ def test_write_fetch_full_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2
skip_size = cache_line_size.value / 2
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=ReadWrite.write,
skip=skip_size,
blocksize=blocksize,
@ -87,7 +87,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Fill core partition with pattern."):
cache_mode_traits = CacheMode.get_traits(cache_mode)
if CacheModeTrait.InsertRead in cache_mode_traits:
run_fio(target=core_part.system_path,
run_fio(target=core_part.path,
operation_type=ReadWrite.write,
blocksize=Size(4, Unit.KibiByte),
io_size=io_size,
@ -103,7 +103,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
with TestRun.step("Cache half of file."):
operation_type = ReadWrite.read if CacheModeTrait.InsertRead in cache_mode_traits \
else ReadWrite.write
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=operation_type,
skip=cache_line_size.value,
blocksize=cache_line_size.value,
@ -117,7 +117,7 @@ def test_write_fetch_partial_misses(cache_mode, cache_line_size):
io_stats_before_io = cache_disk.get_io_stats()
blocksize = cache_line_size.value / 2 * 3
skip_size = cache_line_size.value / 2
run_fio(target=core.system_path,
run_fio(target=core.path,
operation_type=ReadWrite.write,
skip=skip_size,
blocksize=blocksize,

View File

@ -41,8 +41,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path)
non_cas_fio = write_pattern(non_cas_part.system_path)
cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run()
non_cas_fio.run()
@ -206,15 +206,15 @@ def check_discards(discards_count, device, discards_expected):
if discards_expected:
if discards_count > 0:
TestRun.LOGGER.info(
f"{discards_count} TRIM instructions generated for {device.system_path}")
f"{discards_count} TRIM instructions generated for {device.path}")
else:
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.system_path}")
TestRun.LOGGER.error(f"No TRIM instructions found in requests to {device.path}")
else:
if discards_count > 0:
TestRun.LOGGER.error(
f"{discards_count} TRIM instructions generated for {device.system_path}")
f"{discards_count} TRIM instructions generated for {device.path}")
else:
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.system_path}")
TestRun.LOGGER.info(f"No TRIM instructions found in requests to {device.path}")
def start_monitoring(core_dev, cache_dev, cas_dev):

View File

@ -81,7 +81,7 @@ def test_ioclass_core_id(filesystem):
if filesystem:
dd_dst_paths = [cached_mountpoint + "/test_file", not_cached_mountpoint + "/test_file"]
else:
dd_dst_paths = [core_1.system_path, core_2.system_path]
dd_dst_paths = [core_1.path, core_2.path]
for path in dd_dst_paths:
dd = (

View File

@ -39,7 +39,7 @@ def test_ioclass_directory_depth(filesystem):
cache, core = prepare()
Udev.disable()
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
@ -157,7 +157,7 @@ def test_ioclass_directory_file_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}."):
f"and mounting {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()
@ -290,7 +290,7 @@ def test_ioclass_directory_dir_operations(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(fs_type=filesystem)
core.mount(mount_point=mountpoint)
sync()

View File

@ -53,7 +53,7 @@ def test_ioclass_file_extension():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
@ -128,7 +128,7 @@ def test_ioclass_file_name_prefix():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}"):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}"):
previous_occupancy = cache.get_occupancy()
core.create_filesystem(Filesystem.ext3)
@ -285,7 +285,7 @@ def test_ioclass_file_offset():
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare filesystem and mount {core.system_path} at {mountpoint}."):
with TestRun.step(f"Prepare filesystem and mount {core.path} at {mountpoint}."):
core.create_filesystem(Filesystem.ext3)
core.mount(mountpoint)
@ -374,7 +374,7 @@ def test_ioclass_file_size(filesystem):
with TestRun.step("Prepare and load IO class config."):
load_file_size_io_classes(cache, base_size)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)

View File

@ -53,7 +53,7 @@ def test_ioclass_process_name():
dd = (
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(dd_count)
.block_size(dd_size)
.seek(i)
@ -91,7 +91,7 @@ def test_ioclass_pid():
dd_command = str(
Dd()
.input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(dd_count)
.block_size(dd_size)
)

View File

@ -39,7 +39,7 @@ def test_ioclass_usage_sum():
Udev.disable()
with TestRun.step(
f"Prepare filesystem and mount {core.system_path} at {mountpoint}"
f"Prepare filesystem and mount {core.path} at {mountpoint}"
):
filesystem = Filesystem.xfs
core.create_filesystem(filesystem)

View File

@ -66,7 +66,7 @@ def test_ioclass_lba():
for lba in range(min_cached_lba, max_cached_lba, 8):
dd = (
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(lba)
@ -90,7 +90,7 @@ def test_ioclass_lba():
continue
dd = (
Dd().input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.seek(rand_lba)
@ -140,7 +140,7 @@ def test_ioclass_request_size():
req_size = random.choice(cached_req_sizes)
dd = (
Dd().input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
@ -163,7 +163,7 @@ def test_ioclass_request_size():
req_size = random.choice(not_cached_req_sizes)
dd = (
Dd().input("/dev/zero")
.output(core.system_path)
.output(core.path)
.count(1)
.block_size(req_size)
.oflag("direct")
@ -212,12 +212,12 @@ def test_ioclass_direct(filesystem):
.io_engine(IoEngine.libaio) \
.size(io_size).offset(io_size) \
.read_write(ReadWrite.write) \
.target(f"{mountpoint}/tmp_file" if filesystem else core.system_path)
.target(f"{mountpoint}/tmp_file" if filesystem else core.path)
with TestRun.step("Prepare filesystem."):
if filesystem:
TestRun.LOGGER.info(
f"Preparing {filesystem.name} filesystem and mounting {core.system_path} at"
f"Preparing {filesystem.name} filesystem and mounting {core.path} at"
f" {mountpoint}"
)
core.create_filesystem(filesystem)
@ -305,7 +305,7 @@ def test_ioclass_metadata(filesystem):
)
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.system_path} "
with TestRun.step(f"Prepare {filesystem.name} filesystem and mount {core.path} "
f"at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
@ -444,7 +444,7 @@ def test_ioclass_id_as_condition(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(base_dir_path)
@ -553,7 +553,7 @@ def test_ioclass_conditions_or(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
with TestRun.step(f"Prepare {filesystem.name} filesystem "
f"and mount {core.system_path} at {mountpoint}."):
f"and mount {core.path} at {mountpoint}."):
core.create_filesystem(filesystem)
core.mount(mountpoint)
for i in range(1, 6):
@ -614,7 +614,7 @@ def test_ioclass_conditions_and(filesystem):
casadm.load_io_classes(cache_id=cache.cache_id, file=ioclass_config_path)
TestRun.LOGGER.info(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}")
f"and mounting {core.path} at {mountpoint}")
core.create_filesystem(filesystem)
core.mount(mountpoint)
sync()
@ -662,7 +662,7 @@ def test_ioclass_effective_ioclass(filesystem):
f"file_size:ge:{file_size_bytes // 2}"]
with TestRun.LOGGER.step(f"Preparing {filesystem.name} filesystem "
f"and mounting {core.system_path} at {mountpoint}"):
f"and mounting {core.path} at {mountpoint}"):
core.create_filesystem(filesystem)
core.mount(mountpoint)
fs_utils.create_directory(test_dir)

View File

@ -72,7 +72,7 @@ def test_acp_functional(cache_mode):
.direct()
.size(chunk_size)
.block_size(Size(1, Unit.Blocks4096))
.target(f"{core.system_path}"))
.target(f"{core.path}"))
for chunk in chunk_list:
fio.add_job().offset(chunk.offset).io_size(chunk.writes_size)
fio.run()

View File

@ -88,8 +88,8 @@ def test_recovery_all_options(cache_mode, cache_line_size, cleaning_policy, file
core.unmount()
TestRun.LOGGER.info(f"Number of dirty blocks in cache: {cache.get_dirty_blocks()}")
power_cycle_dut()
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Try to start cache without load and force option."):
try:

View File

@ -56,12 +56,12 @@ def test_recovery_flush_reset_raw(cache_mode):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path, size=test_file_size,
copy_file(source=source_file.full_path, target=core.path, size=test_file_size,
direct="oflag")
with TestRun.step("Sync and flush buffers."):
os_utils.sync()
output = TestRun.executor.run(f"hdparm -f {core.system_path}")
output = TestRun.executor.run(f"hdparm -f {core.path}")
if output.exit_code != 0:
raise CmdException("Error during hdparm", output)
@ -70,8 +70,8 @@ def test_recovery_flush_reset_raw(cache_mode):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(wait_for_flush_begin=True, core_device=core_device)
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Copy file from core and check if current md5sum is different than "
"before restart."):
@ -155,8 +155,8 @@ def test_recovery_flush_reset_fs(cache_mode, fs):
with TestRun.step("Hard reset DUT during data flushing."):
power_cycle_dut(True, core_device)
cache_device.system_path = cache_device_link.get_target()
core_device.system_path = core_device_link.get_target()
cache_device.path = cache_device_link.get_target()
core_device.path = core_device_link.get_target()
with TestRun.step("Load cache."):
cache = casadm.load_cache(cache_device)

View File

@ -131,7 +131,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
core = cache.add_core(core_device)
with TestRun.step("Copy file to CAS."):
copy_file(source=source_file.full_path, target=core.system_path,
copy_file(source=source_file.full_path, target=core.path,
size=test_file_size, direct="oflag")
TestRun.LOGGER.info(str(core.get_statistics()))
@ -156,7 +156,7 @@ def test_recovery_unplug_cache_raw(cache_mode, cls):
cache.stop()
with TestRun.step("Copy file from core device and check md5sum."):
copy_file(source=core_device.system_path, target=target_file.full_path,
copy_file(source=core_device.path, target=target_file.full_path,
size=test_file_size, direct="iflag")
compare_files(source_file, target_file)

View File

@ -170,4 +170,4 @@ def test_flush_over_640_gibibytes_raw_device(cache_mode):
def check_disk_size(device: Device):
if device.size < required_disk_size:
pytest.skip(f"Not enough space on device {device.system_path}.")
pytest.skip(f"Not enough space on device {device.path}.")

View File

@ -60,7 +60,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Write data to the exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \
dd = Dd().output(core.path) \
.input(test_file_main.full_path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
@ -85,7 +85,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \
.input(core.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@ -100,7 +100,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@ -133,7 +133,7 @@ def test_clean_stop_cache(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_3 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@ -277,7 +277,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Write data to exported object."):
test_file_main = create_random_test_file("/tmp/test_file_main", Size(64, Unit.MebiByte))
dd = Dd().output(core.system_path) \
dd = Dd().output(core.path) \
.input(test_file_main.full_path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
@ -302,7 +302,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the exported object."):
test_file_1 = File.create_file("/tmp/test_file_1")
dd = Dd().output(test_file_1.full_path) \
.input(core.system_path) \
.input(core.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@ -317,7 +317,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from the core device."):
test_file_2 = File.create_file("/tmp/test_file_2")
dd = Dd().output(test_file_2.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")
@ -350,7 +350,7 @@ def test_clean_remove_core_without_fs(cache_mode):
with TestRun.step("Read data from core device again."):
test_file_3 = File.create_file("/tmp/test_file_3")
dd = Dd().output(test_file_3.full_path) \
.input(core_part.system_path) \
.input(core_part.path) \
.block_size(bs) \
.count(int(test_file_main.size / bs)) \
.oflag("direct")

View File

@ -76,7 +76,7 @@ def test_user_cli():
with TestRun.step("Try to start cache."):
try:
output = run_as_other_user(cli.start_cmd(cache_dev.system_path), user_name)
output = run_as_other_user(cli.start_cmd(cache_dev.path), user_name)
if output.exit_code == 0:
TestRun.LOGGER.error("Starting cache should fail!")
except CmdException:
@ -105,7 +105,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache."):
try:
output = run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part2.system_path), user_name)
core_part2.path), user_name)
if output.exit_code == 0:
TestRun.LOGGER.error("Adding core to cache should fail!")
except CmdException:
@ -244,7 +244,7 @@ def test_user_cli():
with TestRun.step("Try to start cache with 'sudo'."):
try:
run_as_other_user(cli.start_cmd(cache_dev.system_path, force=True), user_name, True)
run_as_other_user(cli.start_cmd(cache_dev.path, force=True), user_name, True)
except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to start cache.")
@ -259,7 +259,7 @@ def test_user_cli():
with TestRun.step("Try to add core to cache with 'sudo'."):
try:
run_as_other_user(cli.add_core_cmd(str(cache.cache_id),
core_part1.system_path), user_name, True)
core_part1.path), user_name, True)
except CmdException:
TestRun.LOGGER.error("Non-root sudoer user should be able to add core to cache.")

View File

@ -100,7 +100,7 @@ def test_block_stats_write(cache_mode, zero_stats):
dd = (
Dd()
.input("/dev/zero")
.output(f"{core.system_path}")
.output(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.oflag("direct")
@ -225,7 +225,7 @@ def test_block_stats_read(cache_mode, zero_stats):
dd = (
Dd()
.output("/dev/zero")
.input(f"{core.system_path}")
.input(f"{core.path}")
.count(dd_count)
.block_size(dd_size)
.iflag("direct")

View File

@ -271,8 +271,8 @@ def dd_builder(cache_mode, cache_line_size, count, device):
.count(count))
if CacheModeTrait.InsertRead in CacheMode.get_traits(cache_mode):
dd.input(device.system_path).output("/dev/null").iflag("direct")
dd.input(device.path).output("/dev/null").iflag("direct")
else:
dd.input("/dev/urandom").output(device.system_path).oflag("direct")
dd.input("/dev/urandom").output(device.path).oflag("direct")
return dd

View File

@ -66,7 +66,7 @@ def test_cache_config_stats():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"):
@ -107,7 +107,7 @@ def test_core_config_stats():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio_pid = fio.run_in_background()
with TestRun.step(f"Wait {time_to_wait} seconds"):
@ -255,11 +255,11 @@ def validate_cache_config_statistics(caches, after_io: bool = False):
failed_stats += (
f"For cache number {caches[i].cache_id} cache ID is "
f"{caches_stats[i].config_stats.cache_id}\n")
if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.system_path:
if caches_stats[i].config_stats.cache_dev != caches[i].cache_device.path:
failed_stats += (
f"For cache number {caches[i].cache_id} cache device "
f"is {caches_stats[i].config_stats.cache_dev}, "
f"should be {caches[i].cache_device.system_path}\n")
f"should be {caches[i].cache_device.path}\n")
if caches_stats[i].config_stats.cache_size.value != caches[i].size.value:
failed_stats += (
f"For cache number {caches[i].cache_id} cache size is "
@ -344,23 +344,23 @@ def validate_core_config_statistics(cores, caches=None):
for j in range(cores_per_cache)
]
for j in range(cores_per_cache):
if cores_stats[j].config_stats.exp_obj != cores[i][j].system_path:
if cores_stats[j].config_stats.exp_obj != cores[i][j].path:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"value in stats is {cores_stats[j].config_stats.exp_obj}\n")
if cores_stats[j].config_stats.core_id != cores[i][j].core_id:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core ID is {cores_stats[j].config_stats.core_id}, "
f"should be {cores[i][j].core_id}\n")
if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.system_path:
if cores_stats[j].config_stats.core_dev != cores[i][j].core_device.path:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core device is {cores_stats[j].config_stats.core_dev}, "
f"should be {cores[i][j].core_device.system_path}\n")
f"should be {cores[i][j].core_device.path}\n")
if cores_stats[j].config_stats.core_size.value != cores[i][j].size.value:
failed_stats += (
f"For exported object {cores[i][j].system_path} "
f"For exported object {cores[i][j].path} "
f"core size is {cores_stats[j].config_stats.core_size.value}, "
f"should be {cores[i][j].size.value}\n")
if (
@ -368,16 +368,16 @@ def validate_core_config_statistics(cores, caches=None):
!= cores[i][j].get_status()
):
failed_stats += (
f"For exported object {cores[i][j].system_path} core "
f"For exported object {cores[i][j].path} core "
f"status is {cores_stats[j].config_stats.status}, should be "
f"{str(cores[i][j].get_status()).split('.')[1].capitalize()}\n")
if cores_stats[j].config_stats.seq_cutoff_policy is None:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off policy should not be empty\n")
if cores_stats[j].config_stats.seq_cutoff_threshold.value <= 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"Sequential cut-off threshold should be greater then 0\n")
if caches:
cache_mode = CacheMode[
@ -386,21 +386,21 @@ def validate_core_config_statistics(cores, caches=None):
if CacheModeTrait.LazyWrites in CacheMode.get_traits(cache_mode):
if cores_stats[j].config_stats.dirty_for.total_seconds() <= 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} in "
f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater then 0\n")
else:
if cores_stats[j].config_stats.dirty_for.total_seconds() != 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} in "
f"For exported object {cores[i][j].path} in "
f"{cache_mode} cache mode, value of 'Dirty for' "
f"after IO is {cores_stats[j].config_stats.dirty_for}, "
f"should equal 0\n")
else:
if cores_stats[j].config_stats.dirty_for.total_seconds() < 0:
failed_stats += (
f"For exported object {cores[i][j].system_path} value of "
f"For exported object {cores[i][j].path} value of "
f"'Dirty for' is {cores_stats[j].config_stats.dirty_for}, "
f"should be greater or equal 0\n")
@ -412,7 +412,7 @@ def validate_core_config_statistics(cores, caches=None):
def validate_statistics_flat(device, stats, stat_filter, per_core: bool):
device_name = (
f"core device {device.system_path}" if per_core else
f"core device {device.path}" if per_core else
f"cache number {device.cache_id}")
failed_stats = ""
if stat_filter == StatsFilter.usage:

View File

@ -310,7 +310,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
Udev.disable()
caches, cores = [], []
for i, cache_device in enumerate(cache_devices):
TestRun.LOGGER.info(f"Starting cache on {cache_device.system_path}")
TestRun.LOGGER.info(f"Starting cache on {cache_device.path}")
cache = casadm.start_cache(cache_device,
force=True,
cache_mode=cache_modes[i],
@ -320,7 +320,7 @@ def prepare(random_cls, cache_count=1, cores_per_cache=1):
cache.set_cleaning_policy(CleaningPolicy.nop)
for core_device in core_devices[i * cores_per_cache:(i + 1) * cores_per_cache]:
TestRun.LOGGER.info(
f"Adding core device {core_device.system_path} to cache {cache.cache_id}")
f"Adding core device {core_device.path} to cache {cache.cache_id}")
core = cache.add_core(core_dev=core_device)
core.reset_counters()
cores.append(core)

View File

@ -78,7 +78,7 @@ def test_stat_max_cache():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio.run()
sleep(3)
@ -128,7 +128,7 @@ def test_stat_max_core(cache_mode):
with TestRun.step("Run 'fio'"):
fio = fio_prepare()
for j in range(cores_per_cache):
fio.add_job().target(cores[j].system_path)
fio.add_job().target(cores[j].path)
fio.run()
sleep(3)

View File

@ -61,7 +61,7 @@ def test_stats_values():
fio = fio_prepare()
for i in range(caches_count):
for j in range(cores_per_cache):
fio.add_job().target(cores[i][j].system_path)
fio.add_job().target(cores[i][j].path)
fio.run()
sleep(3)
@ -156,22 +156,22 @@ def check_stats_initial(caches, cores):
if stat_name.lower() == "free":
if stat_value != caches[i].size.value:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} "
f"For core device {cores[i][j].path} "
f"value for '{stat_name}' is {stat_value}, "
f"should equal cache size: {caches[i].size.value}\n")
elif stat_value != 0:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} value for "
f"For core device {cores[i][j].path} value for "
f"'{stat_name}' is {stat_value}, should equal 0\n")
for stat_name, stat_value in cores_stats_perc[j].items():
if stat_name.lower() == "free":
if stat_value != 100:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value "
f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 100\n")
elif stat_value != 0:
TestRun.LOGGER.error(
f"For core device {cores[i][j].system_path} percentage value "
f"For core device {cores[i][j].path} percentage value "
f"for '{stat_name}' is {stat_value}, should equal 0\n")
@ -191,7 +191,7 @@ def check_stats_after_io(caches, cores, after_reload: bool = False):
)
for j in range(cores_per_cache):
fail_message = (
f"For core device {cores[i][j].system_path} in {cache_mode} cache mode ")
f"For core device {cores[i][j].path} in {cache_mode} cache mode ")
if after_reload:
validate_usage_stats(
cores_stats[j], cores_stats_perc[j], caches[i], cache_mode, fail_message)

View File

@ -194,7 +194,7 @@ def test_kedr_basic_io_raw(module, unload_modules, install_kedr):
.run_time(timedelta(minutes=4))
.time_based()
.read_write(ReadWrite.randrw)
.target(f"{core.system_path}")
.target(f"{core.path}")
.direct()
).run()

View File

@ -79,7 +79,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
.num_jobs(cores_number)
.cpus_allowed_policy(CpusAllowedPolicy.split))
for core in cores:
fio.add_job(f"job_{core.core_id}").target(core.system_path)
fio.add_job(f"job_{core.core_id}").target(core.path)
output = fio.run()[0]
TestRun.LOGGER.info(f"Total read I/O [KiB]: {str(output.read_io())}\n"
f"Total write I/O [KiB]: {str(output.write_io())}")
@ -88,7 +88,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core = []
for core in cores:
md5sum_core.append(TestRun.executor.run(
f"md5sum -b {core.system_path}").stdout.split(" ")[0])
f"md5sum -b {core.path}").stdout.split(" ")[0])
with TestRun.step("Stop cache."):
cache.stop()
@ -97,7 +97,7 @@ def test_stress_small_cas_device(cache_line_size, cores_number, cache_config):
md5sum_core_dev = []
for core_dev in core_dev.partitions:
md5sum_core_dev.append(TestRun.executor.run(
f"md5sum -b {core_dev.system_path}").stdout.split(" ")[0])
f"md5sum -b {core_dev.path}").stdout.split(" ")[0])
with TestRun.step("Compare md5 sum of exported objects and cores."):
if md5sum_core_dev != md5sum_core:

View File

@ -105,5 +105,5 @@ def run_io(exported_objects):
.io_depth(32) \
.run_time(timedelta(minutes=5)) \
.num_jobs(5) \
.target(exported_objects[i].system_path)
.target(exported_objects[i].path)
fio.run_in_background()

View File

@ -39,8 +39,8 @@ def test_trim_start_discard():
non_cas_part = dev.partitions[1]
with TestRun.step("Writing different pattern on partitions"):
cas_fio = write_pattern(cas_part.system_path)
non_cas_fio = write_pattern(non_cas_part.system_path)
cas_fio = write_pattern(cas_part.path)
non_cas_fio = write_pattern(non_cas_part.path)
cas_fio.run()
non_cas_fio.run()

View File

@ -44,7 +44,7 @@ def test_discard_on_huge_core():
# RCU-sched type stall sometimes appears in dmesg log after more
# than one execution of blkdiscard.
for _ in range(8):
TestRun.executor.run_expect_success(f"blkdiscard {core.system_path}")
TestRun.executor.run_expect_success(f"blkdiscard {core.path}")
with TestRun.step("Check dmesg for RCU-sched stall."):
check_for_rcu_sched_type_stall()

View File

@ -28,13 +28,13 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache with ID=1 on device ${CACHE_DEVICE}1 (/dev/sda1, for instance)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Use the same device but a different ID - negative test
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" NEGATIVE_TEST_OPTION="1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" NEGATIVE_TEST_OPTION="1" start_cache
# Use the same ID but a different device - another negative test
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" NEGATIVE_TEST_OPTION="1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" NEGATIVE_TEST_OPTION="1" start_cache
# Clear up after test
CACHE_ID_OPTION="1" stop_cache

View File

@ -31,22 +31,22 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE1 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Try to add already taken CORE device and a non-existing core to cache 1
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Try to add already taken CORE device and a non-existing core to cache 2
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Remove the core device from cache
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" remove_core

View File

@ -28,21 +28,21 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Create 3 primary partitions on CORE_DEVICE, each of 4000M size
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}2" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}3" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part2" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part3" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@ -68,7 +68,7 @@ done
for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" stop_cache
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@ -31,7 +31,7 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
# Try to start positive caches in loop and later stop them - if any of those operations fails, it
# means the cache ID is invalid
for ID in $POSITIVE_IDS ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="$ID" stop_cache
done
@ -41,7 +41,7 @@ done
# automatically.
for ID in $NEGATIVE_IDS ; do
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="$ID"
CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="yes" start_cache
CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="yes" start_cache
done
# Always return 0 at the end of the test - if at any point something has failed

View File

@ -4,7 +4,7 @@
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
#DESCRIPTION --set-param option fuzzing.
# DESCRIPTION --set-param option fuzzing.
# This tests checks whether CLI accepts correct data and rejects incorrect
# data for "--flush-parameters" option. It tries to invoke CLI using different
@ -22,15 +22,9 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# create cache in WT mode and try to change flush-parameters
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" start_cache
# changing flush parameters should be prohibited while core is added to cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="$CORE_DEVICE" add_core
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
# remove core
sleep 1
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" remove_core
printf "\n============Running negative tests============\n"
@ -45,8 +39,8 @@ CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STA
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="-1" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="-1" NEGATIVE_TEST_OPTION="1" set_flush_params
# test for 0 wake_up_time and 0 flush buffers
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="0" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
# test for 0 staleness-time and 0 flush buffers
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="0" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="0" ACTIVITY_THRESH_OPTION="5" NEGATIVE_TEST_OPTION="1" set_flush_params
printf "\n============Running positive tests============\n"
@ -58,9 +52,10 @@ CACHE_ID_OPTION="1" CLEAN_POL_OPTION="acp" set_cleaning_policy
CACHE_ID_OPTION="1" CLEAN_POL_OPTION="alru" set_cleaning_policy
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="1" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="0" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="0" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="100" ACTIVITY_THRESH_OPTION="500" set_flush_params
CACHE_ID_OPTION="1" CLEAN_POL_NS_OPTION="cleaning-alru" WAKE_UP_OPTION="100" STALE_TIME_OPTION="50" FLUSH_BUFFERS_OPTION="1" ACTIVITY_THRESH_OPTION="500" set_flush_params
# stop cache now
CACHE_ID_OPTION="1" stop_cache

View File

@ -42,14 +42,14 @@ do
echo "------Start CAS Linux in $mode mode"
# This is where the real test starts
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"1 CACHE_MODE_OPTION="$mode"
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"-part1 CACHE_MODE_OPTION="$mode"
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"1 add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"-part1 add_core
sleep 2
# iostat read
TMP_CACHE_DEVICE=$(echo "${CACHE_DEVICE}" | cut -c6-)
TMP_CACHE_DEVICE=$(echo "$(realpath ${CACHE_DEVICE})" | cut -c6-)
run_cmd "dd if=/dev/cas1-1 of=$TMP_DIR/file001.bin bs=$BLOCK_SIZE count=$COUNT skip=10000 iflag=direct"
READ_CACHE_1=$(iostat "${CACHE_DEVICE}" | grep $TMP_CACHE_DEVICE | awk 'NR==1 {print $5}')
run_cmd "dd if=/dev/cas1-1 of=$TMP_DIR/file001.bin bs=$BLOCK_SIZE count=$COUNT skip=10000 iflag=direct"
@ -74,14 +74,14 @@ do
echo "------Start CAS Linux in $mode mode"
# This is where the real test starts
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"1 CACHE_MODE_OPTION="$mode"
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}"-part1 CACHE_MODE_OPTION="$mode"
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"1 add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}"-part1 add_core
sleep 2
# iostat write and write
TMP_CORE_DEVICE=$(echo "${CORE_DEVICE}" | cut -c6-)
TMP_CORE_DEVICE=$(echo "$(realpath ${CORE_DEVICE})" | cut -c6-)
WRITE_CORE_0=$(iostat "${CORE_DEVICE}" | grep $TMP_CORE_DEVICE | awk 'NR==1 {print $6}')
WRITE_CACHE_0=$(iostat "${CACHE_DEVICE}" | grep $TMP_CACHE_DEVICE | awk 'NR==1 {print $6}')
run_cmd "dd if=$TMP_DIR/file001.bin of=/dev/cas1-1 bs=$BLOCK_SIZE count=$COUNT seek=20000 oflag=direct"

View File

@ -28,12 +28,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start caches
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" start_cache
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" start_cache
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" start_cache
#Assembly multi-level cache (add cores)
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${DEVICE_NAME}1-1" add_core
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${DEVICE_NAME}2-1" add_core

View File

@ -31,16 +31,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@ -67,8 +67,8 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" flush_cache
CACHE_ID_OPTION="$ID" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}-part${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@ -31,16 +31,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_FORCE_OPTION="1" CACHE_MODE_OPTION=$CACHE_MODE start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@ -67,8 +67,8 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
CACHE_ID_OPTION="$ID" flush_cache
CACHE_ID_OPTION="$ID" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd dd if=/dev/zero of="${CACHE_DEVICE}-part${ID}" bs=1M count=1 oflag=direct
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@ -23,14 +23,14 @@ TEST_DEVICE=${DEVICE_NAME}1-1
#param device
get_stat_sectors_read() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $6 }')
echo $L_STAT
}
#param device
get_stat_sectors_written() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $10 }')
echo $L_STAT
}
@ -46,13 +46,13 @@ cache_suspend_init() {
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
cache_suspend_deinit() {
@ -80,7 +80,7 @@ cache_suspend_test() {
CACHE_MODE_FLUSH_OPTION="yes" CACHE_ID_OPTION="1" CACHE_MODE_OPTION="pt" set_cache_mode
# Get read cache statistics before.
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
# Read file
test_log_trace "Read 4k, Read has to be performed from core"
@ -89,7 +89,7 @@ cache_suspend_test() {
# Sync
sync && echo 3 > /proc/sys/vm/drop_caches
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
test_log_trace "Cache reads before : $L_CACHE_READS_BEFORE"
test_log_trace "Cache reads after : $L_CACHE_READS_AFTER"
@ -107,9 +107,9 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
# Read file
test_log_trace "Read 4k, read form core only"
@ -119,9 +119,9 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
test_log_trace "Core reads before : $L_CORE_READS_BEFORE"
test_log_trace "Core reads after : $L_CORE_READS_AFTER"
@ -146,8 +146,8 @@ cache_suspend_test() {
# Resume the cache
CACHE_ID_OPTION="1" CACHE_MODE_OPTION="wt" set_cache_mode
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_WRITES_BEFORE=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
test_log_trace "Read 4k, read form core, write on cache"
# Read file
@ -156,8 +156,8 @@ cache_suspend_test() {
# Sync
sync && echo 3 > /proc/sys/vm/drop_caches
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
L_CACHE_WRITES_AFTER=$(get_stat_sectors_written ${CACHE_DEVICE}-part1)
test_log_trace "Core reads before : $L_CORE_READS_BEFORE"
test_log_trace "Core reads after : $L_CORE_READS_AFTER"

View File

@ -27,10 +27,10 @@ TEST_COUNT_HALF=16384
#param device
get_stat_sectors_read() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
if [[ ${L_DEVICE} =~ "nvme" ]]
then
L_DEVICE="${L_DEVICE:0:${#L_DEVICE}-1}p${L_DEVICE: -1}"
L_DEVICE="${L_DEVICE:0:${#L_DEVICE}-1}-part${L_DEVICE: -1}"
fi
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $6 }')
echo $L_STAT
@ -38,7 +38,7 @@ get_stat_sectors_read() {
#param device
get_stat_sectors_written() {
L_DEVICE=$(basename $1)
L_DEVICE=$(basename $(realpath $1))
L_STAT=$(cat /proc/diskstats | grep $L_DEVICE | awk '{ print $10 }')
echo $L_STAT
}
@ -54,13 +54,13 @@ cache_suspend_init() {
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
run_cmd dd if=/dev/zero of="${CORE_DEVICE}1" bs=1M count=1 oflag=direct
run_cmd dd if=/dev/zero of="${CORE_DEVICE}-part1" bs=1M count=1 oflag=direct
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
cache_suspend_deinit() {
@ -107,8 +107,8 @@ cache_suspend_test() {
CACHE_ID_OPTION="1" CACHE_MODE_OPTION="wt" set_cache_mode
# Get statistics before
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_BEFORE=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
L_CORE_READS_BEFORE=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
# Read from
test_log_trace "Read from CAS"
@ -117,8 +117,8 @@ cache_suspend_test() {
sync && echo 3 > /proc/sys/vm/drop_caches
# Get statistics after
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}1)
L_CACHE_READS_AFTER=$(get_stat_sectors_read ${CACHE_DEVICE}-part1)
L_CORE_READS_AFTER=$(get_stat_sectors_read ${CORE_DEVICE}-part1)
test_log_trace "Cache reads before : $L_CACHE_READS_BEFORE"
test_log_trace "Cache reads after : $L_CACHE_READS_AFTER"

View File

@ -74,12 +74,12 @@ check_config() {
fi
# Check if core and cache devices are free for us or not
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CORE_DEVICE}[0-9]* 2> /dev/null)" ] ; then
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CORE_DEVICE}-part[0-9]* 2> /dev/null)" ] ; then
warning "The core device $CORE_DEVICE is partitioned! Some tests may remove partitions from this device"
warning "Use --ignore | -i flag to force using this core device"
exit 1
fi
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CACHE_DEVICE}[0-9]* 2> /dev/null)" ] ; then
if [ -z $IGNORE_WARNINGS ] && [ -n "$(ls ${CACHE_DEVICE}-part[0-9]* 2> /dev/null)" ] ; then
warning "The cache device $CACHE_DEVICE is partitioned!"
warning "Use --ignore | -i flag to force using this cache device"
exit 1
@ -93,11 +93,15 @@ check_config() {
umount $DEVICE_TO_UMOUNT
done
if [ -n "$(mount | grep $CACHE_DEVICE)" ] ; then
SHORT_CACHE_LINK=$(realpath $CACHE_DEVICE)
OUTPUT_MOUNT_CACHE=$(mount | grep -E "$CACHE_DEVICE|$SHORT_CACHE_LINK")
if [ -n "$OUTPUT_MOUNT_CACHE" ] ; then
error "The cache device $CACHE_DEVICE or one of its partitions is mounted!"
exit 1
fi
if [ -n "$(mount | grep $CORE_DEVICE)" ] ; then
SHORT_CORE_LINK=$(realpath $CORE_DEVICE)
OUTPUT_MOUNT_CORE=$(mount | grep -E $CORE_DEVICE|$SHORT_CORE_LINK)
if [ -n "$OUTPUT_MOUNT_CORE" ] ; then
error "The core device $CORE_DEVICE or one of its partitions is mounted!"
exit 1
fi

View File

@ -16,6 +16,7 @@ OCF
RHEL
SLES
SSD
SSDP
SUSE
TERM
TTY
@ -48,7 +49,10 @@ reseller
resizing
runtime
sdb
sdx
utf
wa
wb
wt
wo
wwn

View File

@ -30,18 +30,6 @@ restore_config() {
start_cache() {
check_options ${FUNCNAME[0]}
CACHE_DEVICE_NVME_REGEX="(/dev/nvme[0-9]n[0-9])([0-9]*)"
if [ ! -b ${CACHE_DEVICE_OPTION} ]
then
if [[ "${CACHE_DEVICE_OPTION}" =~ ${CACHE_DEVICE_NVME_REGEX} ]]
then
if [ -b ${BASH_REMATCH[1]}p${BASH_REMATCH[2]} ]
then
CACHE_DEVICE_OPTION=${BASH_REMATCH[1]}p${BASH_REMATCH[2]}
fi
fi
fi
local COMMAND="$CAS --start-cache --cache-device $CACHE_DEVICE_OPTION --cache-id $CACHE_ID_OPTION"
if [ -n "$CACHE_FORCE_OPTION" ] ; then
@ -113,7 +101,8 @@ add_core() {
return 0
fi
local i=0
local CAS_DEV=` casadm -L | egrep "^.core +[0-9]+ +$CORE_DEVICE_OPTION" | awk '{print $NF}'`
local SHORT_LINK=$(realpath $CORE_DEVICE_OPTION)
local CAS_DEV=` casadm -L | egrep "^.core +[0-9]+ +$SHORT_LINK" | awk '{print $NF}'`
clear_options
while [ ! -e $CAS_DEV ]; do
sleep 2
@ -202,19 +191,6 @@ get_stats_value() {
clear_options
}
format_nvme() {
check_options ${FUNCNAME[0]}
local COMMAND="$CAS --nvme --format $NVME_FORMAT_MODE_OPTION --device $DEVICE_OPTION"
if [ -n "$NVME_FORMAT_FORCE_OPTION" ] ; then
COMMAND="$COMMAND --force"
fi
run_cmd $COMMAND
clear_options
}
init() {
check_options ${FUNCNAME[0]}
@ -304,10 +280,10 @@ init() {
CACHE_LINE_SIZE="$L_CACHE_LINE_SIZE"
fi
CACHE_ID_OPTION="$i" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$i" start_cache
CACHE_ID_OPTION="$i" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part$i" start_cache
for ((j = 1; j <= L_NUMBER_OF_CORE_PARTITIONS && j <= MAX_NUMBER_OF_CORE_PARTITIONS; j++)); do
CACHE_ID_OPTION="$i" CORE_DEVICE_OPTION="${CORE_DEVICE}$k" add_core
CACHE_ID_OPTION="$i" CORE_DEVICE_OPTION="${CORE_DEVICE}-part$k" add_core
k=`expr $k \+ 1`
done
done
@ -717,7 +693,6 @@ export -f try_add_core
export -f remove_core
export -f check_device_state
export -f get_stats_value
export -f format_nvme
export -f init
export -f iteration
export -f deinit
@ -822,9 +797,9 @@ make_primary_partitions() {
for ID in `seq 1 $PART_NUM`; do
local i=0
local TEST_DEV="${TARGET_DEVICE_OPTION}${ID}"
local TEST_DEV_P="${TARGET_DEVICE_OPTION}p${ID}"
while ! [[ -b $TEST_DEV || -b $TEST_DEV_P ]] ; do
local TEST_DEV="${DEV_NAME}-part${ID}"
local TEST_DEV_P="${DEV_NAME}-part${ID}"
while ! [[ -L $TEST_DEV || -L $TEST_DEV_P ]] ; do
# make sure that partition is detected if it was created
partprobe
sleep 1
@ -843,7 +818,7 @@ make_primary_partitions() {
# erase all filesystem/cas cache metadata that may have existed on it
# before.
if [ -b $TEST_DEV ]
if [ -L $TEST_DEV ]
then
run_cmd dd if=/dev/zero of="${TEST_DEV}" bs=1M count=1 oflag=direct
else
@ -927,6 +902,7 @@ remove_caches() {
}
turn_on_device() {
# Use realpath resolved before turning off device
check_options ${FUNCNAME[0]}
if [[ $CACHE_DEVICE_OPTION == "/dev/nvme"* ]] ; then
turn_on_nvme_device
@ -939,17 +915,19 @@ turn_on_device() {
turn_off_device() {
check_options ${FUNCNAME[0]}
if [[ $CACHE_DEVICE_OPTION == "/dev/nvme"* ]] ; then
SHORT_LINK=$(realpath $CACHE_DEVICE_OPTION)
if [[ $SHORT_LINK == "/dev/nvme"* ]] ; then
turn_off_nvme_device
else
local COMMAND="echo 'offline' > /sys/block/${CACHE_DEVICE_OPTION:4}/device/state"
local COMMAND="echo 'offline' > /sys/block/${SHORT_LINK:4}/device/state"
run_cmd $COMMAND
fi
}
turn_off_nvme_device() {
check_options ${FUNCNAME[0]}
COMMAND="echo '1' > /sys/block/${CACHE_DEVICE_OPTION:4}/device/device/remove"
SHORT_LINK=$(realpath $CACHE_DEVICE_OPTION)
COMMAND="echo '1' > /sys/block/${SHORT_LINK:4}/device/device/remove"
run_cmd $COMMAND
clear_options
}

View File

@ -30,6 +30,21 @@ check_if_root_or_exit() {
fi
}
resolve_path() {
local BY_ID_DIR="/dev/disk/by-id"
local BY_ID_LINKS=$(ls $BY_ID_DIR)
for BY_ID_PATH in $BY_ID_LINKS
do
FULL_PATH="${BY_ID_DIR}/${BY_ID_PATH}"
if [[ "$(realpath $FULL_PATH)" -ef "$(realpath $DEVICE)" ]]
then
DEVICE=$FULL_PATH
break
fi
done
}
parse_args() {
while [ -n "$1" ] ; do
@ -51,10 +66,14 @@ parse_args() {
-i | --ignore ) export IGNORE_WARNINGS="1"
;;
-c | --cache ) shift
CACHE_DEVICE="$1"
DEVICE="$1"
resolve_path
CACHE_DEVICE=$DEVICE
;;
-d | --core ) shift
CORE_DEVICE="$1"
DEVICE="$1"
resolve_path
CORE_DEVICE=$DEVICE
;;
* ) echo "Unrecognized option"
usage

View File

@ -9,8 +9,8 @@
# If you want to use this file, rename it to "cas_local_config".
# Default core and cache devices - note that we require whole devices, not partitions
export CORE_DEVICE="/dev/sdd"
export CACHE_DEVICE="/dev/sdf"
export CORE_DEVICE="/dev/disk/by-id/ata-SUPER_SPEED_DISK_SSD"
export CACHE_DEVICE="/dev/disk/by-id/nvme-BETTER_SSD_KINGOFSSDS"
# Default size of partition for cache/core device. This is used only for
# the DEFAULT_* API functions

View File

@ -20,7 +20,7 @@ export ALL_OPTIONS="
PROMO_POL_NS_OPTION PROMO_POL_VALUE THRESHOLD_OPTION TRIGGER_OPTION THRESHOLD_VALUE TRIGGER_VALUE THRESHOLD_VALUE_ERROR TRIGGER_VALUE_ERROR
TARGET_DEVICE_OPTION FILESYSTEM_TYPE
IO_CLASS_ID IO_CLASS_PRIORITY IO_CLASS_SIZE_MIN IO_CLASS_SIZE_MAX IO_CLASS_NAME IO_CLASS_CACHE_MODE
FORMAT_NVME_REQUIRED_OPTIONS CHECK_IS_NVME_ATOMIC TURN_OFF_NVME_DEVICE TURN_ON_NVME_DEVICE
CHECK_IS_NVME_ATOMIC TURN_OFF_NVME_DEVICE TURN_ON_NVME_DEVICE
DEVICE_ID_OPTION DEMANDED_STATE_OPTION
STAT_UNIT_OPTION STAT_NAME_OPTION
STORE_CONFIG_OPTION
@ -50,7 +50,6 @@ export SET_FLUSH_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION CLEAN_POL_NS_OPTION"
export GET_FLUSH_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION CLEAN_POL_NS_OPTION"
export SET_PROMOTION_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION PROMO_POL_NS_OPTION"
export CHECK_PROMOTION_PARAMS_REQUIRED_OPTIONS="CACHE_ID_OPTION PROMO_POL_NS_OPTION"
export FORMAT_NVME_REQUIRED_OPTIONS="NVME_FORMAT_MODE_OPTION DEVICE_OPTION"
export CHECK_IS_NVME_ATOMIC_REQUIRED_OPTIONS="DEVICE_OPTION"
export CREATE_PARTITION_REQUIRED_OPTIONS="CACHE_ID_OPTION PARTITION_ID_OPTION PRIORITY_OPTION MIN_SIZE_OPTION MAX_SIZE_OPTION CLEANING_POLICY_OPTION"

View File

@ -48,10 +48,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {

View File

@ -34,7 +34,7 @@ eviction_policy_init() {
# Create 1 primary partitions on CACHE_DEVICE
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION=$CACHE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make empty cache device, clear previous content, clear previous metadata
dd if=/dev/zero of="${CACHE_DEVICE}1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
dd if=/dev/zero of="${CACHE_DEVICE}-part1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
@ -50,10 +50,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {
@ -322,7 +322,7 @@ test_log_start
run_cmd eviction_policy_init
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}-part1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_deinit

View File

@ -34,7 +34,7 @@ eviction_policy_init() {
# Create 1 primary partitions on CACHE_DEVICE
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION=$CACHE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make empty cache device, clear previous content, clear previous metadata
dd if=/dev/zero of="${CACHE_DEVICE}1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
dd if=/dev/zero of="${CACHE_DEVICE}-part1" bs="4k" count=$CACHE_DEVICE_SIZE &>/dev/null
# Create 1 primary partitions on CORE_DEVICE
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
@ -50,10 +50,10 @@ eviction_policy_init() {
fi
# Start cache on CACHE_DEVICE1
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
}
eviction_policy_flush() {
@ -315,7 +315,7 @@ test_log_start
run_cmd eviction_policy_init "YES"
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_test $TEST_DEVICE "${CORE_DEVICE}-part1" $(get_bytes $CORE_DEVICE_SIZE)
run_cmd eviction_policy_deinit

View File

@ -36,10 +36,10 @@ CACHE_LINE_SIZES="4 8 16 32 64"
for mode in $CACHE_MODES; do
for line_size in $CACHE_LINE_SIZES; do
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" \
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" \
CACHE_MODE_OPTION="$mode" CACHE_LINE_SIZE="$line_size" \
CACHE_FORCE_OPTION="yes" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
for engine in $IO_ENGINES; do
run_cmd "fio --ioengine=${engine} --direct=1 --name=test \

View File

@ -31,12 +31,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Prepare cache on CACHE_DEVICE1 (/dev/sdd1, e.g.) and add core device using CORE_DEVICE1, CORE_DEVICE2 and CORE_DEVICE3 (/dev/sde1, /dev/sde2, /dev/sde3, e.g)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@ -46,19 +46,19 @@ CACHE_ID_OPTION="1" stop_cache
# Add cores to pool, then load cache and check if cache is running
# Try to add core devices and check their states
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" CORE_ID_OPTION="1" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}1 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" CORE_ID_OPTION="1" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part1" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" CORE_ID_OPTION="2" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}2 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" CORE_ID_OPTION="2" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part2" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION=${CORE_DEVICE}3 DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part3" DEMANDED_STATE_OPTION="Detached" check_device_state
# Try to load cache device, check if it is running and if all cores status is appropirate
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@ -31,12 +31,12 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Prepare cache on CACHE_DEVICE1 (/dev/sdd1, e.g.) and add core device using CORE_DEVICE1, CORE_DEVICE2 and CORE_DEVICE3 (/dev/sde1, /dev/sde2, /dev/sde3, e.g)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@ -48,8 +48,8 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
sleep 1
# Load cache, then add cores and check if chache is running
# Try to load cache device, check its state and cores state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Incomplete" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Incomplete" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Inactive" check_device_state
@ -59,16 +59,16 @@ CACHE_ID_OPTION="1" CACHE_DONT_FLUSH_DATA_OPTION="1" stop_cache
TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
sleep 1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" CORE_ID_OPTION="1" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}2" CORE_ID_OPTION="2" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}1" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}2" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}3" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" CORE_ID_OPTION="1" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" CORE_ID_OPTION="2" try_add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" CORE_ID_OPTION="3" try_add_core
DEVICE_ID_OPTION="${CORE_DEVICE}-part1" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}-part2" DEMANDED_STATE_OPTION="Detached" check_device_state
DEVICE_ID_OPTION="${CORE_DEVICE}-part3" DEMANDED_STATE_OPTION="Detached" check_device_state
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="y" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="y" start_cache
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@ -31,18 +31,18 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
1 ${CACHE_DEVICE}1 WT
1 ${CACHE_DEVICE}-part1 WT
[cores]
1 1 ${CORE_DEVICE}1
1 2 ${CORE_DEVICE}2
1 3 ${CORE_DEVICE}3
1 1 ${CORE_DEVICE}-part1
1 2 ${CORE_DEVICE}-part2
1 3 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
run_cmd "udevadm settle"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state
@ -53,9 +53,7 @@ check_no_cache_running
run_cmd "casctl start"
run_cmd "udevadm settle"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-1" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-2" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}1-3" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@ -35,16 +35,16 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
16384 ${CACHE_DEVICE}1 WB cleaning_policy=acp
16384 ${CACHE_DEVICE}-part1 WB cleaning_policy=acp
[cores]
16384 4095 ${CORE_DEVICE}1
16384 4094 ${CORE_DEVICE}2
16384 0 ${CORE_DEVICE}3
16384 4095 ${CORE_DEVICE}-part1
16384 4094 ${CORE_DEVICE}-part2
16384 0 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-0" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4095" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4094" DEMANDED_STATE_OPTION="Active" check_device_state
@ -57,7 +57,7 @@ check_no_cache_running
run_cmd "casctl start"
DEVICE_ID_OPTION=${CACHE_DEVICE}1 DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${CACHE_DEVICE}-part1" DEMANDED_STATE_OPTION="Running" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-0" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4095" DEMANDED_STATE_OPTION="Active" check_device_state
DEVICE_ID_OPTION="${DEVICE_NAME}16384-4094" DEMANDED_STATE_OPTION="Active" check_device_state

View File

@ -30,11 +30,11 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_
cat > $CAS_CONFIG_PATH <<- EOM
${CAS_CONFIG_VERSION_TAG}
[caches]
1 ${CACHE_DEVICE}1 WB cleaning_policy=nop
1 ${CACHE_DEVICE}-part1 WB cleaning_policy=nop
[cores]
1 1 ${CORE_DEVICE}1
1 2 ${CORE_DEVICE}2
1 3 ${CORE_DEVICE}3
1 1 ${CORE_DEVICE}-part1
1 2 ${CORE_DEVICE}-part2
1 3 ${CORE_DEVICE}-part3
EOM
run_cmd "casctl init"
@ -46,7 +46,7 @@ run_cmd "udevadm settle"
run_cmd "casctl stop"
# We shouldn't be able to start cache on this device, it contains dirty data
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION=${CACHE_DEVICE}1 CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 run_cmd "casctl init"
clear_options
@ -56,7 +56,7 @@ run_cmd "casctl start"
run_cmd "casctl stop"
# We still shouldn't be able to start
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION=${CACHE_DEVICE}1 CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_ID_OPTION=1 start_cache
NEGATIVE_TEST_OPTION=1 run_cmd "casctl init"
clear_options
@ -69,7 +69,7 @@ run_cmd "udevadm settle"
run_cmd "casctl stop --flush"
run_cmd "casadm -S -d ${CACHE_DEVICE}1 --force"
run_cmd "casadm -S -d ${CACHE_DEVICE}-part1 --force"
run_cmd "casctl stop"

View File

@ -46,10 +46,10 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="200M" PARTITION_IDS_
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="400M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Prepare IO class config with wlth specified
io_class_config_wlth

View File

@ -48,10 +48,10 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="200M" PARTITION_IDS_
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="400M" PARTITION_IDS_OPTION="1" make_primary_partitions
# Start cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Prepare IO class config with wlth specified
io_class_config_wlth

View File

@ -24,13 +24,13 @@ test_log_start
# Corrupts random bit from byte on given by $1 address from ${CACHE_DEVICE}1
corrupt_byte(){
OFFSET=$1
READ_VALUE=`od -N 1 -j $OFFSET -A n -t x1 ${CACHE_DEVICE}1`
READ_VALUE=`od -N 1 -j $OFFSET -A n -t x1 ${CACHE_DEVICE}-part1`
READ_VALUE="0x`echo $READ_VALUE`"
RANDOM_UINT32=`od -An -tu4 -N4 /dev/urandom`
MASK=$(( 1 << $(( $RANDOM_UINT32 % 8 )) ))
echo -e "\x$(( ${READ_VALUE} ^ ${MASK} ))"| dd bs=1 count=1 seek=$OFFSET conv=notrunc of=${CACHE_DEVICE}1 1>&2 2>/dev/null
echo -e "\x$(( ${READ_VALUE} ^ ${MASK} ))"| dd bs=1 count=1 seek=$OFFSET conv=notrunc of=${CACHE_DEVICE}-part1 1>&2 2>/dev/null
}
# Use CACHE_DEVICE and CORE_DEVICE provided by configuration file and remove partitions from those devices
@ -52,7 +52,7 @@ do
fi
# Start cache on CACHE_DEVICE to repair it and also to make log.
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Stop cache.
CACHE_ID_OPTION="1" stop_cache
@ -88,7 +88,7 @@ do
corrupt_byte ${CORRUPT_ADDRESS}
# Start again with load option, this should fail, metadata is corrupted.
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
done

View File

@ -40,7 +40,7 @@ do
fi
# Start cache on CACHE_DEVICE to repair it and also to make log.
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_ID_OPTION="1" CACHE_FORCE_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Stop cache.
CACHE_ID_OPTION="1" stop_cache
@ -59,10 +59,10 @@ do
test_log_trace "Corrupting 64K bytes in ${SECTION}"
# Corrupt cache metadata
run_cmd "dd if=/dev/urandom of="${CACHE_DEVICE}1" bs=1 count=64K conv=notrunc seek=${METADATA_SECTION_OFFSET}K "
run_cmd "dd if=/dev/urandom of=${CACHE_DEVICE}-part1 bs=1 count=64K conv=notrunc seek=${METADATA_SECTION_OFFSET}K "
# Start again with load option, this should fail, metadata is corrupted.
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
NEGATIVE_TEST_OPTION="1" CACHE_ID_OPTION="1" CACHE_LOAD_METADATA_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
done
test_log_stop

View File

@ -26,7 +26,7 @@ TARGET_DEVICE_OPTION="$CORE_DEVICE" remove_partitions
TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS_OPTION="1" make_primary_partitions
# create cache in WT mode and try to change promotion parameters
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_FORCE_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_FORCE_OPTION="1" start_cache
# changing promotion parameters should not be prohibited while core is added to cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="$CORE_DEVICE" add_core
@ -108,7 +108,7 @@ CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="451"
CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="812" TRIGGER_OPTION="49" set_promotion_params
CACHE_ID_OPTION="1" PROMO_POL_OPTION="nhit" set_promotion_policy
CACHE_ID_OPTION="1" stop_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_LOAD_METADATA_OPTION="1" start_cache
CACHE_MODE_OPTION="wt" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_LOAD_METADATA_OPTION="1" start_cache
CACHE_ID_OPTION="1" PROMO_POL_OPTION="nhit" check_promotion_policy
CACHE_ID_OPTION="1" PROMO_POL_NS_OPTION="promotion-nhit" THRESHOLD_OPTION="812" TRIGGER_OPTION="49" check_promotion_params

View File

@ -28,16 +28,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@ -64,16 +64,17 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
done
SHORT_LINK=$(realpath ${CACHE_DEVICE})
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_off_device
for ID in 1 2 3 ; do
DONT_FAIL_ON_ERROR_OPTION="YES" CACHE_ID_OPTION="$ID" stop_cache
done
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_on_device
CACHE_DEVICE_OPTION="${SHORT_LINK}" turn_on_device
for ID in 1 2 3 ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$ID" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part${ID}" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" stop_cache
run_cmd "mount ${CORE_DEVICE}${ID} ${MOUNTPOINT}-${ID}-1"
run_cmd "mount ${CORE_DEVICE}-part${ID} ${MOUNTPOINT}-${ID}-1"
done
# Now check for files' presence and umount core devices

View File

@ -28,16 +28,16 @@ TARGET_DEVICE_OPTION="$CACHE_DEVICE" PARTITION_SIZE_OPTION="2000M" PARTITION_IDS
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION="4000M" PARTITION_IDS_OPTION="1 2 3" make_primary_partitions
# Start cache on CACHE_DEVICE1 (/dev/sdd1, for example) with ID=1 and add a core device using CORE_DEVICE1 (/dev/sde1, for example)
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
# Start cache on CACHE_DEVICE2 (/dev/sdd2, for example) with ID=2 and add a core device using CORE_DEVICE2 (/dev/sde2, for example)
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}2" add_core
CACHE_ID_OPTION="2" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part2" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="2" CORE_DEVICE_OPTION="${CORE_DEVICE}-part2" add_core
# Start cache on CACHE_DEVICE3 (/dev/sdd3, for example) with ID=3 and add a core device using CORE_DEVICE3 (/dev/sde3, for example)
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}3" add_core
CACHE_ID_OPTION="3" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part3" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="3" CORE_DEVICE_OPTION="${CORE_DEVICE}-part3" add_core
# Create filesystems on cached devices - we do this using run_cmd because it is not in the API (and probably won't be).
# The test framework will accept invoking the commands directly (e.g. "mkfs.ext3 [...]" without the "run_cmd"), but the
@ -64,14 +64,15 @@ for ID in 1 2 3 ; do
run_cmd "umount ${MOUNTPOINT}-${ID}-1"
done
SHORT_LINK=$(realpath ${CACHE_DEVICE})
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_off_device
for ID in 1 2 3 ; do
DONT_FAIL_ON_ERROR_OPTION="YES" CACHE_ID_OPTION="$ID" stop_cache
done
CACHE_DEVICE_OPTION="${CACHE_DEVICE}" turn_on_device
CACHE_DEVICE_OPTION="${SHORT_LINK}" turn_on_device
for ID in 1 2 3 ; do
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}$ID" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part${ID}" CACHE_LOAD_METADATA_OPTION="y" CACHE_MODE_OPTION="wb" start_cache
CACHE_ID_OPTION="$ID" CORE_ID_OPTION="1" mount_cache
done

View File

@ -36,13 +36,13 @@ wb_init() {
TARGET_DEVICE_OPTION="$CORE_DEVICE" PARTITION_SIZE_OPTION=$CORE_DEVICE_SIZE PARTITION_IDS_OPTION="1" make_primary_partitions
# Make ext3 file system
TARGET_DEVICE_OPTION="${CORE_DEVICE}1" FILESYSTEM_TYPE="ext3" make_filesystem
TARGET_DEVICE_OPTION="${CORE_DEVICE}-part1" FILESYSTEM_TYPE="ext3" make_filesystem
# Start cache on CACHE_DEVICE1
CACHE_MODE_OPTION="wb" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}1" start_cache
CACHE_MODE_OPTION="wb" CACHE_ID_OPTION="1" CACHE_DEVICE_OPTION="${CACHE_DEVICE}-part1" start_cache
# Add a core device using CORE_DEVICE1
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}1" add_core
CACHE_ID_OPTION="1" CORE_DEVICE_OPTION="${CORE_DEVICE}-part1" add_core
#Mount file system
CACHE_ID_OPTION="1" CORE_ID_OPTION="1" mount_cache

View File

@ -272,25 +272,6 @@ def test_cas_config_add_same_core_symlinked_02(mock_realpath):
config.insert_core(core_symlinked)
@patch("os.path.realpath")
@patch("os.listdir")
def test_cas_config_get_by_id_path(mock_listdir, mock_realpath):
mock_listdir.return_value = [
"wwn-1337deadbeef-x0x0",
"wwn-1337deadbeef-x0x0-part1",
"nvme-INTEL_SSDAAAABBBBBCCC_0984547ASDDJHHHFH",
]
mock_realpath.side_effect = (
lambda x: "/dev/dummy1"
if x == "/dev/disk/by-id/wwn-1337deadbeef-x0x0-part1"
else x
)
path = opencas.cas_config.get_by_id_path("/dev/dummy1")
assert path == "/dev/disk/by-id/wwn-1337deadbeef-x0x0-part1"
@patch("os.path.realpath")
@patch("os.listdir")
def test_cas_config_get_by_id_path_not_found(mock_listdir, mock_realpath):

View File

@ -172,7 +172,7 @@ command there is a different list of available options:
.TP
.B -d, --cache-device <DEVICE>
Path to caching device to be used e.g. SSD device (/dev/sdb).
Path to caching device using by-id link (e.g. /dev/disk/by-id/nvme-INTEL_SSDP...).
.TP
.B -i, --cache-id <ID>
@ -460,7 +460,7 @@ Identifier of cache instance <1-16384>.
.TP
.B -d, --core-device <DEVICE>
Path to core device e.g. HDD device.
Path to core device using by-id link (e.g. /dev/disk/by-id/wwn-0x1234567890b100d).
.TP
.B -j, --core-id <ID>
@ -491,6 +491,11 @@ Path to core device to be removed from core pool.
.B -o, --output-format {table|csv}
Defines output format for list of all cache instances and core devices. It can be either \fBtable\fR (default) or \fBcsv\fR.
.TP
.B -b --by-id-path
Display path to devices in long format (/dev/disk/by-id/some_link).
If this option is not given, displays path in short format (/dev/sdx) instead.
.SH Options that are valid with --stats (-P) are:
.TP
.B -i, --cache-id <ID>
@ -499,7 +504,7 @@ Identifier of cache instance <1-16384>.
.TP
.B -j, --core-id <ID>
Identifier of core instance <0-4095> within given cache instance. If this option is
not given, aggregate statistics for whole cache instance are printed instead.
not given, aggregated statistics for whole cache instance are printed instead.
.TP
.B -d, --io-class-id <ID>
@ -531,6 +536,11 @@ Default for --filter option is \fBall\fR.
Defines output format for statistics. It can be either \fBtable\fR
(default) or \fBcsv\fR.
.TP
.B -b --by-id-path
Display path to device in long format (/dev/disk/by-id/some_link).
If this option is not given, displays path in short format (/dev/sdx) instead.
.SH Options that are valid with --reset-counters (-Z) are:
.TP
.B -i, --cache-id <ID>
@ -576,19 +586,6 @@ Identifier of cache instance <1-16384>.
Defines output format for printed IO class configuration. It can be either
\fBtable\fR (default) or \fBcsv\fR.
.SH Options that are valid with --nvme --format (-N -F) are:
.TP
.B -d, --device <DEVICE>
Path to NVMe device to be formatted (e.g. /dev/nvme0).
.TP
.B -f, --force
Force to format NVMe device. By default device will not be formatted if utility
detects on the device file system or presence of dirty data after cache dirty
shutdown. This parameter formats NVMe namespace regardless to this situations.
.SH Command --help (-H) does not accept any options.
.BR

View File

@ -5,7 +5,6 @@
#
import subprocess
import time
import opencas
import sys
import os
@ -21,8 +20,7 @@ try:
config = opencas.cas_config.from_file('/etc/opencas/opencas.conf',
allow_incomplete=True)
except Exception as e:
sl.syslog(sl.LOG_ERR,
'Unable to load opencas config. Reason: {0}'.format(str(e)))
sl.syslog(sl.LOG_ERR, 'Unable to load opencas config. Reason: {str(e)}')
exit(1)
for cache in config.caches.values():
@ -32,8 +30,8 @@ for cache in config.caches.values():
opencas.start_cache(cache, True)
except opencas.casadm.CasadmError as e:
sl.syslog(sl.LOG_WARNING,
'Unable to load cache {0} ({1}). Reason: {2}'
.format(cache.cache_id, cache.device, e.result.stderr))
f'Unable to load cache {cache.cache_id} ({cache.device}). '
f'Reason: {e.result.stderr}')
exit(e.result.exit_code)
exit(0)
for core in cache.cores.values():
@ -43,7 +41,7 @@ for cache in config.caches.values():
opencas.add_core(core, True)
except opencas.casadm.CasadmError as e:
sl.syslog(sl.LOG_WARNING,
'Unable to attach core {0} from cache {1}. Reason: {2}'
.format(core.device, cache.cache_id, e.result.stderr))
f'Unable to attach core {core.device} from cache {cache.cache_id}. '
f'Reason: {e.result.stderr}')
exit(e.result.exit_code)
exit(0)

View File

@ -5,12 +5,13 @@ version=19.3.0
# of this file please refer to appropriate documentation
# NOTES:
# 1) It is highly recommended to specify cache/core device using path
# that is constant across reboots - e.g. disk device links in
# 1) It is required to specify cache/core device using links in
# /dev/disk/by-id/, preferably those using device WWN if available:
# /dev/disk/by-id/wwn-0x123456789abcdef0
# Referencing devices via /dev/sd* may result in cache misconfiguration after
# system reboot due to change(s) in drive order.
# Referencing devices via /dev/sd* is prohibited because
# may result in cache misconfiguration after system reboot
# due to change(s) in drive order. It is allowed to use /dev/cas*-*
# as a device path.
## Caches configuration section
[caches]

View File

@ -41,7 +41,8 @@ Extra fields (optional) lazy_startup=<true,false>
.TP
\fBNOTES\fR
.RS
1) It is highly recommended to specify cache/core device using path that is constant across reboots - e.g. disk device links in /dev/disk/by-id/, preferably those using device WWN if available: /dev/disk/by-id/wwn-0x123456789abcdef0. Referencing devices via /dev/sd* may result in cache misconfiguration after system reboot due to change(s) in drive order.
1) It is required to specify cache/core device using links in /dev/disk/by-id/, preferably those using device WWN if available: /dev/disk/by-id/wwn-0x123456789abcdef0.
Referencing devices via /dev/sd* is prohibited because may result in cache misconfiguration after system reboot due to change(s) in drive order. It is allowed to use /dev/cas*-* as a device path.
.TP
2) To specify use of the IC Classification file, place ioclass_file=path/to/file.csv in caches configuration section under Extra fields (optional)

View File

@ -47,7 +47,8 @@ class casadm:
def list_caches(cls):
cmd = [cls.casadm_path,
'--list-caches',
'--output-format', 'csv']
'--output-format', 'csv',
'--by-id-path']
return cls.run_cmd(cmd)
@classmethod
@ -163,6 +164,7 @@ class casadm:
class cas_config(object):
default_location = '/etc/opencas/opencas.conf'
_by_id_dir = '/dev/disk/by-id'
class ConflictingConfigException(ValueError):
pass
@ -172,17 +174,16 @@ class cas_config(object):
@staticmethod
def get_by_id_path(path):
blocklist = ["lvm", "md-name"]
path = os.path.abspath(path)
for id_path in os.listdir('/dev/disk/by-id'):
if any([id_path.startswith(x) for x in blocklist]):
continue
if os.path.exists(path) or cas_config._is_exp_obj_path(path):
return path
else:
raise ValueError(f"Given path {path} isn't correct by-id path.")
full_path = '/dev/disk/by-id/{0}'.format(id_path)
if os.path.realpath(full_path) == os.path.realpath(path):
return full_path
raise ValueError('By-id device link not found for {0}'.format(path))
@staticmethod
def _is_exp_obj_path(path):
return re.search(r"cas\d+-\d+", path) is not None
@staticmethod
def check_block_device(path):
@ -192,10 +193,10 @@ class cas_config(object):
try:
mode = os.stat(path).st_mode
except:
raise ValueError('{0} not found'.format(path))
raise ValueError(f'{path} not found')
if not stat.S_ISBLK(mode):
raise ValueError('{0} is not block device'.format(path))
raise ValueError(f'{path} is not block device')
class cache_config(object):
def __init__(self, cache_id, device, cache_mode, **params):
@ -255,12 +256,12 @@ class cas_config(object):
elif param_name == "lazy_startup":
self.check_lazy_startup_valid(param_value)
else:
raise ValueError('{0} is invalid parameter name'.format(param_name))
raise ValueError(f'{param_name} is invalid parameter name')
@staticmethod
def check_cache_id_valid(cache_id):
if not 1 <= int(cache_id) <= 16384:
raise ValueError('{0} is invalid cache id'.format(cache_id))
raise ValueError(f'{cache_id} is invalid cache id')
def check_cache_device_empty(self):
try:
@ -273,17 +274,16 @@ class cas_config(object):
if len(list(filter(lambda a: a != '', result.stdout.split('\n')))) > 1:
raise ValueError(
'Partitions found on device {0}. Use force option to ignore'.
format(self.device))
'Partitions found on device {self.device}. Use force option to ignore'
)
def check_cache_mode_valid(self, cache_mode):
if cache_mode.lower() not in ['wt', 'pt', 'wa', 'wb', 'wo']:
raise ValueError('Invalid cache mode {0}'.format(cache_mode))
raise ValueError(f'Invalid cache mode {cache_mode}')
def check_cleaning_policy_valid(self, cleaning_policy):
if cleaning_policy.lower() not in ['acp', 'alru', 'nop']:
raise ValueError('{0} is invalid cleaning policy name'.format(
cleaning_policy))
raise ValueError(f'{cleaning_policy} is invalid cleaning policy name')
def check_lazy_startup_valid(self, lazy_startup):
if lazy_startup.lower() not in ["true", "false"]:
@ -291,13 +291,11 @@ class cas_config(object):
def check_promotion_policy_valid(self, promotion_policy):
if promotion_policy.lower() not in ['always', 'nhit']:
raise ValueError('{0} is invalid promotion policy name'.format(
promotion_policy))
raise ValueError(f'{promotion_policy} is invalid promotion policy name')
def check_cache_line_size_valid(self, cache_line_size):
if cache_line_size not in ['4', '8', '16', '32', '64']:
raise ValueError('{0} is invalid cache line size'.format(
cache_line_size))
raise ValueError(f'{cache_line_size} is invalid cache line size')
def check_recursive(self):
if not self.device.startswith('/dev/cas'):
@ -310,7 +308,7 @@ class cas_config(object):
raise ValueError('Recursive configuration detected')
def to_line(self):
ret = '{0}\t{1}\t{2}'.format(self.cache_id, self.device, self.cache_mode)
ret = f'{self.cache_id}\t{self.device}\t{self.cache_mode}'
if len(self.params) > 0:
i = 0
for param, value in self.params.items():
@ -319,7 +317,7 @@ class cas_config(object):
else:
ret += '\t'
ret += '{0}={1}'.format(param, value)
ret += f'{param}={value}'
i += 1
ret += '\n'
@ -378,16 +376,14 @@ class cas_config(object):
if param_name == "lazy_startup":
if param_value.lower() not in ["true", "false"]:
raise ValueError(
"{} is invalid value for '{}' core param".format(
param_value, param_name
)
f"{param_value} is invalid value for '{param_name}' core param"
)
else:
raise ValueError("'{}' is invalid core param name".format(param_name))
raise ValueError(f"'{param_name}' is invalid core param name")
def check_core_id_valid(self):
if not 0 <= int(self.core_id) <= 4095:
raise ValueError('{0} is invalid core id'.format(self.core_id))
raise ValueError(f'{self.core_id} is invalid core id')
def check_recursive(self):
if not self.device.startswith('/dev/cas'):
@ -400,11 +396,11 @@ class cas_config(object):
raise ValueError('Recursive configuration detected')
def to_line(self):
ret = "{0}\t{1}\t{2}".format(self.cache_id, self.core_id, self.device)
ret = f"{self.cache_id}\t{self.core_id}\t{self.device}"
for i, (param, value) in enumerate(self.params.items()):
ret += "," if i > 0 else "\t"
ret += "{0}={1}".format(param, value)
ret += f"{param}={value}"
ret += "\n"
return ret
@ -493,7 +489,7 @@ class cas_config(object):
def insert_core(self, new_core_config):
if new_core_config.cache_id not in self.caches:
raise KeyError('Cache id {0} doesn\'t exist'.format(new_core_config.cache_id))
raise KeyError(f'Cache id {new_core_config.cache_id} doesn\'t exist')
try:
for cache_id, cache in self.caches.items():
@ -537,7 +533,7 @@ class cas_config(object):
def write(self, config_file):
try:
with open(config_file, 'w') as conf:
conf.write('{0}\n'.format(self.version_tag))
conf.write(f'{self.version_tag}\n')
conf.write('# This config was automatically generated\n')
conf.write('[caches]\n')
@ -547,7 +543,6 @@ class cas_config(object):
conf.write('\n[cores]\n')
for core in self.cores:
conf.write(core.to_line())
except:
raise Exception('Couldn\'t write config file')
@ -563,6 +558,7 @@ def start_cache(cache, load, force=False):
load=load,
force=force)
def configure_cache(cache):
if "cleaning_policy" in cache.params:
casadm.set_param(
@ -587,6 +583,7 @@ def add_core(core, attach):
# Another helper functions
def is_cache_started(cache_config):
dev_list = get_caches_list()
for dev in dev_list:
@ -595,6 +592,7 @@ def is_cache_started(cache_config):
return False
def is_core_added(core_config):
dev_list = get_caches_list()
cache_id = 0
@ -609,14 +607,17 @@ def is_core_added(core_config):
return False
def get_caches_list():
result = casadm.list_caches()
return list(csv.DictReader(result.stdout.split('\n')))
def check_cache_device(device):
result = casadm.check_cache_device(device)
return list(csv.DictReader(result.stdout.split('\n')))[0]
def get_cas_version():
version = casadm.get_version()
@ -640,7 +641,7 @@ class CompoundException(Exception):
s = "Multiple exceptions occured:\n" if len(self.exception_list) > 1 else ""
for e in self.exception_list:
s += '{0}\n'.format(str(e))
s += f'{str(e)}\n'
return s
@ -659,6 +660,7 @@ class CompoundException(Exception):
else:
raise self
def detach_core_recursive(cache_id, core_id, flush):
# Catching exceptions is left to uppermost caller of detach_core_recursive
# as the immediate caller that made a recursive call depends on the callee
@ -668,12 +670,13 @@ def detach_core_recursive(cache_id, core_id, flush):
if dev['type'] == 'cache':
l_cache_id = dev['id']
elif dev['type'] == 'core' and dev['status'] == 'Active':
if '/dev/cas{0}-{1}'.format(cache_id, core_id) in dev['disk']:
if f'/dev/cas{cache_id}-{core_id}' in dev['disk']:
detach_core_recursive(l_cache_id, dev['id'], flush)
elif l_cache_id == cache_id and dev['id'] == core_id and dev['status'] != 'Active':
return
casadm.remove_core(cache_id, core_id, detach = True, force = not flush)
casadm.remove_core(cache_id, core_id, detach=True, force=not flush)
def detach_all_cores(flush):
error = CompoundException()
@ -681,8 +684,7 @@ def detach_all_cores(flush):
try:
dev_list = get_caches_list()
except casadm.CasadmError as e:
raise Exception('Unable to list caches. Reason:\n{0}'.format(
e.result.stderr))
raise Exception(f'Unable to list caches. Reason:\n{e.result.stderr}')
except:
raise Exception('Unable to list caches.')
@ -696,22 +698,20 @@ def detach_all_cores(flush):
detach_core_recursive(cache_id, dev['id'], flush)
except casadm.CasadmError as e:
error.add_exception(Exception(
'Unable to detach core {0}. Reason:\n{1}'.format(
dev['disk'], e.result.stderr)))
f"Unable to detach core {dev['disk']}. Reason:\n{e.result.stderr}"))
except:
error.add_exception(Exception(
'Unable to detach core {0}.'.format(dev['disk'])))
error.add_exception(Exception(f"Unable to detach core {dev['disk']}."))
error.raise_nonempty()
def stop_all_caches(flush):
error = CompoundException()
try:
dev_list = get_caches_list()
except casadm.CasadmError as e:
raise Exception('Unable to list caches. Reason:\n{0}'.format(
e.result.stderr))
raise Exception(f'Unable to list caches. Reason:\n{e.result.stderr}')
except:
raise Exception('Unable to list caches.')
@ -723,14 +723,13 @@ def stop_all_caches(flush):
casadm.stop_cache(dev['id'], not flush)
except casadm.CasadmError as e:
error.add_exception(Exception(
'Unable to stop cache {0}. Reason:\n{1}'.format(
dev['disk'], e.result.stderr)))
f"Unable to stop cache {dev['disk']}. Reason:\n{e.result.stderr}"))
except:
error.add_exception(Exception(
'Unable to stop cache {0}.'.format(dev['disk'])))
error.add_exception(Exception(f"Unable to stop cache {dev['disk']}."))
error.raise_nonempty()
def stop(flush):
error = CompoundException()
@ -837,7 +836,7 @@ def wait_for_startup(timeout=300, interval=5):
cas_config.default_location, allow_incomplete=True
)
except Exception as e:
raise Exception("Unable to load opencas config. Reason: {0}".format(str(e)))
raise Exception(f"Unable to load opencas config. Reason: {str(e)}")
not_initialized = _get_uninitialized_devices(config)
if not not_initialized: