Merge pull request #1507 from rafalste/cache_detach
Implement cache detach/attach
This commit is contained in:
commit
5dccbc3978
216
casadm/cas_lib.c
216
casadm/cas_lib.c
@ -45,20 +45,8 @@
|
|||||||
|
|
||||||
#define CORE_ADD_MAX_TIMEOUT 30
|
#define CORE_ADD_MAX_TIMEOUT 30
|
||||||
|
|
||||||
#define CHECK_IF_CACHE_IS_MOUNTED -1
|
int is_cache_mounted(int cache_id);
|
||||||
|
int is_core_mounted(int cache_id, int core_id);
|
||||||
/**
|
|
||||||
* @brief Routine verifies if filesystem is currently mounted for given cache/core
|
|
||||||
*
|
|
||||||
* If FAILURE is returned, reason for failure is printed onto
|
|
||||||
* standard error.
|
|
||||||
* @param cache_id cache id of filesystem (to verify if it is mounted)
|
|
||||||
* @param core_id core id of filesystem (to verify if it is mounted); if this
|
|
||||||
* parameter is set to negative value, it is only checked if any core belonging
|
|
||||||
* to given cache is mounted;
|
|
||||||
* @return SUCCESS if is not mounted; FAILURE if filesystem is mounted
|
|
||||||
*/
|
|
||||||
int check_if_mounted(int cache_id, int core_id);
|
|
||||||
|
|
||||||
/* KCAS_IOCTL_CACHE_CHECK_DEVICE wrapper */
|
/* KCAS_IOCTL_CACHE_CHECK_DEVICE wrapper */
|
||||||
int _check_cache_device(const char *device_path,
|
int _check_cache_device(const char *device_path,
|
||||||
@ -808,10 +796,6 @@ struct cache_device *get_cache_device(const struct kcas_cache_info *info, bool b
|
|||||||
cache->promotion_policy = info->info.promotion_policy;
|
cache->promotion_policy = info->info.promotion_policy;
|
||||||
cache->size = info->info.cache_line_size;
|
cache->size = info->info.cache_line_size;
|
||||||
|
|
||||||
if ((info->info.state & (1 << ocf_cache_state_running)) == 0) {
|
|
||||||
return cache;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (cache->core_count = 0; cache->core_count < info->info.core_count; ++cache->core_count) {
|
for (cache->core_count = 0; cache->core_count < info->info.core_count; ++cache->core_count) {
|
||||||
core_id = info->core_id[cache->core_count];
|
core_id = info->core_id[cache->core_count];
|
||||||
|
|
||||||
@ -957,16 +941,13 @@ int check_cache_already_added(const char *cache_device) {
|
|||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int start_cache(uint16_t cache_id, unsigned int cache_init,
|
static int _verify_and_parse_volume_path(char *tgt_buf,
|
||||||
const char *cache_device, ocf_cache_mode_t cache_mode,
|
size_t tgt_buf_size, const char *cache_device,
|
||||||
ocf_cache_line_size_t line_size, int force)
|
size_t paths_size)
|
||||||
{
|
{
|
||||||
int fd = 0;
|
int fd = 0;
|
||||||
struct kcas_start_cache cmd;
|
|
||||||
int status;
|
|
||||||
double min_free_ram_gb;
|
|
||||||
|
|
||||||
/* check if cache device given exists */
|
/* check if cache device exists */
|
||||||
fd = open(cache_device, 0);
|
fd = open(cache_device, 0);
|
||||||
if (fd < 0) {
|
if (fd < 0) {
|
||||||
cas_printf(LOG_ERR, "Device %s not found.\n", cache_device);
|
cas_printf(LOG_ERR, "Device %s not found.\n", cache_device);
|
||||||
@ -974,25 +955,50 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
|
|||||||
}
|
}
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
|
if (set_device_path(tgt_buf, tgt_buf_size, cache_device, paths_size) != SUCCESS) {
|
||||||
|
return FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int _start_cache(uint16_t cache_id, unsigned int cache_init,
|
||||||
|
const char *cache_device, ocf_cache_mode_t cache_mode,
|
||||||
|
ocf_cache_line_size_t line_size, int force, bool start)
|
||||||
|
{
|
||||||
|
int fd = 0;
|
||||||
|
struct kcas_start_cache cmd = {};
|
||||||
|
int status;
|
||||||
|
int ioctl = start ? KCAS_IOCTL_START_CACHE : KCAS_IOCTL_ATTACH_CACHE;
|
||||||
|
double min_free_ram_gb;
|
||||||
|
|
||||||
fd = open_ctrl_device();
|
fd = open_ctrl_device();
|
||||||
if (fd == -1)
|
if (fd == -1)
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
status = _verify_and_parse_volume_path(
|
||||||
|
cmd.cache_path_name,
|
||||||
cmd.cache_id = cache_id;
|
sizeof(cmd.cache_path_name),
|
||||||
cmd.init_cache = cache_init;
|
cache_device,
|
||||||
if (set_device_path(cmd.cache_path_name, sizeof(cmd.cache_path_name),
|
MAX_STR_LEN);
|
||||||
cache_device, MAX_STR_LEN) != SUCCESS) {
|
if (status != SUCCESS) {
|
||||||
close(fd);
|
close(fd);
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd.cache_id = cache_id;
|
||||||
cmd.caching_mode = cache_mode;
|
cmd.caching_mode = cache_mode;
|
||||||
cmd.line_size = line_size;
|
cmd.line_size = line_size;
|
||||||
cmd.force = (uint8_t)force;
|
cmd.force = (uint8_t)force;
|
||||||
|
cmd.init_cache = cache_init;
|
||||||
|
|
||||||
status = run_ioctl_interruptible_retry(fd, KCAS_IOCTL_START_CACHE, &cmd,
|
status = run_ioctl_interruptible_retry(
|
||||||
"Starting cache", cache_id, OCF_CORE_ID_INVALID);
|
fd,
|
||||||
|
ioctl,
|
||||||
|
&cmd,
|
||||||
|
start ? "Starting cache" : "Attaching device to cache",
|
||||||
|
cache_id,
|
||||||
|
OCF_CORE_ID_INVALID);
|
||||||
cache_id = cmd.cache_id;
|
cache_id = cmd.cache_id;
|
||||||
if (status < 0) {
|
if (status < 0) {
|
||||||
close(fd);
|
close(fd);
|
||||||
@ -1002,9 +1008,11 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
|
|||||||
min_free_ram_gb /= GiB;
|
min_free_ram_gb /= GiB;
|
||||||
|
|
||||||
cas_printf(LOG_ERR, "Not enough free RAM.\n"
|
cas_printf(LOG_ERR, "Not enough free RAM.\n"
|
||||||
"You need at least %0.2fGB to start cache"
|
"You need at least %0.2fGB to %s cache"
|
||||||
" with cache line size equal %llukB.\n",
|
" with cache line size equal %llukB.\n",
|
||||||
min_free_ram_gb, line_size / KiB);
|
min_free_ram_gb,
|
||||||
|
start ? "start" : "attach a device to",
|
||||||
|
line_size / KiB);
|
||||||
|
|
||||||
if (64 * KiB > line_size)
|
if (64 * KiB > line_size)
|
||||||
cas_printf(LOG_ERR, "Try with greater cache line size.\n");
|
cas_printf(LOG_ERR, "Try with greater cache line size.\n");
|
||||||
@ -1025,7 +1033,80 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
|
|||||||
check_cache_state_incomplete(cache_id, fd);
|
check_cache_state_incomplete(cache_id, fd);
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
cas_printf(LOG_INFO, "Successfully added cache instance %u\n", cache_id);
|
cas_printf(LOG_INFO, "Successfully %s %u\n",
|
||||||
|
start ? "added cache instance" : "attached device to cache",
|
||||||
|
cache_id);
|
||||||
|
|
||||||
|
return SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int start_cache(uint16_t cache_id, unsigned int cache_init,
|
||||||
|
const char *cache_device, ocf_cache_mode_t cache_mode,
|
||||||
|
ocf_cache_line_size_t line_size, int force)
|
||||||
|
{
|
||||||
|
return _start_cache(cache_id, cache_init, cache_device, cache_mode,
|
||||||
|
line_size, force, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
int attach_cache(uint16_t cache_id, const char *cache_device, int force)
|
||||||
|
{
|
||||||
|
return _start_cache(cache_id, CACHE_INIT_NEW, cache_device,
|
||||||
|
ocf_cache_mode_none, ocf_cache_line_size_none, force, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
int detach_cache(uint16_t cache_id)
|
||||||
|
{
|
||||||
|
int fd = 0;
|
||||||
|
struct kcas_stop_cache cmd = {};
|
||||||
|
int ioctl_code = KCAS_IOCTL_DETACH_CACHE;
|
||||||
|
int status;
|
||||||
|
|
||||||
|
fd = open_ctrl_device();
|
||||||
|
if (fd == -1)
|
||||||
|
return FAILURE;
|
||||||
|
|
||||||
|
cmd.cache_id = cache_id;
|
||||||
|
cmd.flush_data = true;
|
||||||
|
|
||||||
|
status = run_ioctl_interruptible_retry(
|
||||||
|
fd,
|
||||||
|
ioctl_code,
|
||||||
|
&cmd,
|
||||||
|
"Detaching the device from cache",
|
||||||
|
cache_id,
|
||||||
|
OCF_CORE_ID_INVALID);
|
||||||
|
close(fd);
|
||||||
|
|
||||||
|
if (status < 0) {
|
||||||
|
if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) {
|
||||||
|
cas_printf(LOG_ERR,
|
||||||
|
"You have interrupted detaching the device "
|
||||||
|
"from cache %d. CAS continues to operate "
|
||||||
|
"normally.\n",
|
||||||
|
cache_id
|
||||||
|
);
|
||||||
|
return INTERRUPTED;
|
||||||
|
} else if (OCF_ERR_WRITE_CACHE == cmd.ext_err_code) {
|
||||||
|
cas_printf(LOG_ERR,
|
||||||
|
"Detached the device from cache %d "
|
||||||
|
"with errors\n",
|
||||||
|
cache_id
|
||||||
|
);
|
||||||
|
print_err(cmd.ext_err_code);
|
||||||
|
return FAILURE;
|
||||||
|
} else {
|
||||||
|
cas_printf(LOG_ERR,
|
||||||
|
"Error while detaching the device from"
|
||||||
|
" cache %d\n",
|
||||||
|
cache_id
|
||||||
|
);
|
||||||
|
print_err(cmd.ext_err_code);
|
||||||
|
return FAILURE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cas_printf(LOG_INFO, "Successfully detached device from cache %hu\n",
|
||||||
|
cache_id);
|
||||||
|
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
@ -1033,40 +1114,51 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
|
|||||||
int stop_cache(uint16_t cache_id, int flush)
|
int stop_cache(uint16_t cache_id, int flush)
|
||||||
{
|
{
|
||||||
int fd = 0;
|
int fd = 0;
|
||||||
struct kcas_stop_cache cmd;
|
struct kcas_stop_cache cmd = {};
|
||||||
|
int ioctl_code = KCAS_IOCTL_STOP_CACHE;
|
||||||
|
int status;
|
||||||
|
|
||||||
/* don't even attempt ioctl if filesystem is mounted */
|
/* Don't stop instance with mounted filesystem */
|
||||||
if (check_if_mounted(cache_id, CHECK_IF_CACHE_IS_MOUNTED) == FAILURE) {
|
if (is_cache_mounted(cache_id) == FAILURE)
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
|
||||||
|
|
||||||
fd = open_ctrl_device();
|
fd = open_ctrl_device();
|
||||||
if (fd == -1)
|
if (fd == -1)
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
|
|
||||||
memset(&cmd, 0, sizeof(cmd));
|
|
||||||
cmd.cache_id = cache_id;
|
cmd.cache_id = cache_id;
|
||||||
cmd.flush_data = flush;
|
cmd.flush_data = flush;
|
||||||
|
|
||||||
if(run_ioctl_interruptible_retry(fd, KCAS_IOCTL_STOP_CACHE, &cmd, "Stopping cache",
|
status = run_ioctl_interruptible_retry(
|
||||||
cache_id, OCF_CORE_ID_INVALID) < 0) {
|
fd,
|
||||||
|
ioctl_code,
|
||||||
|
&cmd,
|
||||||
|
"Stopping cache",
|
||||||
|
cache_id,
|
||||||
|
OCF_CORE_ID_INVALID);
|
||||||
close(fd);
|
close(fd);
|
||||||
|
|
||||||
|
if (status < 0) {
|
||||||
if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) {
|
if (OCF_ERR_FLUSHING_INTERRUPTED == cmd.ext_err_code) {
|
||||||
cas_printf(LOG_ERR, "You have interrupted stopping of cache. CAS continues\n"
|
cas_printf(LOG_ERR,
|
||||||
"to operate normally. If you want to stop cache without fully\n"
|
"You have interrupted stopping of cache %d. "
|
||||||
"flushing dirty data, use '-n' option.\n");
|
"CAS continues\nto operate normally. The cache"
|
||||||
|
" can be stopped without\nflushing dirty data "
|
||||||
|
"by using '-n' option.\n", cache_id);
|
||||||
return INTERRUPTED;
|
return INTERRUPTED;
|
||||||
} else if (cmd.ext_err_code == OCF_ERR_WRITE_CACHE){
|
} else if (OCF_ERR_WRITE_CACHE == cmd.ext_err_code){
|
||||||
cas_printf(LOG_ERR, "Removed cache %d with errors\n", cache_id);
|
cas_printf(LOG_ERR, "Stopped cache %d with errors\n", cache_id);
|
||||||
print_err(cmd.ext_err_code);
|
print_err(cmd.ext_err_code);
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
} else {
|
} else {
|
||||||
cas_printf(LOG_ERR, "Error while removing cache %d\n", cache_id);
|
cas_printf(LOG_ERR, "Error while stopping cache %d\n", cache_id);
|
||||||
print_err(cmd.ext_err_code);
|
print_err(cmd.ext_err_code);
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(fd);
|
|
||||||
|
cas_printf(LOG_INFO, "Successfully stopped cache %hu\n", cache_id);
|
||||||
|
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1711,7 +1803,7 @@ int add_core(unsigned int cache_id, unsigned int core_id, const char *core_devic
|
|||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int check_if_mounted(int cache_id, int core_id)
|
int _check_if_mounted(int cache_id, int core_id)
|
||||||
{
|
{
|
||||||
FILE *mtab;
|
FILE *mtab;
|
||||||
struct mntent *mstruct;
|
struct mntent *mstruct;
|
||||||
@ -1755,6 +1847,16 @@ int check_if_mounted(int cache_id, int core_id)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int is_cache_mounted(int cache_id)
|
||||||
|
{
|
||||||
|
return _check_if_mounted(cache_id, -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int is_core_mounted(int cache_id, int core_id)
|
||||||
|
{
|
||||||
|
return _check_if_mounted(cache_id, core_id);
|
||||||
|
}
|
||||||
|
|
||||||
int remove_core(unsigned int cache_id, unsigned int core_id,
|
int remove_core(unsigned int cache_id, unsigned int core_id,
|
||||||
bool detach, bool force_no_flush)
|
bool detach, bool force_no_flush)
|
||||||
{
|
{
|
||||||
@ -1762,7 +1864,7 @@ int remove_core(unsigned int cache_id, unsigned int core_id,
|
|||||||
struct kcas_remove_core cmd;
|
struct kcas_remove_core cmd;
|
||||||
|
|
||||||
/* don't even attempt ioctl if filesystem is mounted */
|
/* don't even attempt ioctl if filesystem is mounted */
|
||||||
if (SUCCESS != check_if_mounted(cache_id, core_id)) {
|
if (SUCCESS != is_core_mounted(cache_id, core_id)) {
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1828,7 +1930,7 @@ int remove_inactive_core(unsigned int cache_id, unsigned int core_id,
|
|||||||
struct kcas_remove_inactive cmd;
|
struct kcas_remove_inactive cmd;
|
||||||
|
|
||||||
/* don't even attempt ioctl if filesystem is mounted */
|
/* don't even attempt ioctl if filesystem is mounted */
|
||||||
if (SUCCESS != check_if_mounted(cache_id, core_id)) {
|
if (SUCCESS != is_core_mounted(cache_id, core_id)) {
|
||||||
return FAILURE;
|
return FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2692,6 +2794,7 @@ int list_caches(unsigned int list_format, bool by_id_path)
|
|||||||
char cache_ctrl_dev[MAX_STR_LEN] = "-";
|
char cache_ctrl_dev[MAX_STR_LEN] = "-";
|
||||||
float cache_flush_prog;
|
float cache_flush_prog;
|
||||||
float core_flush_prog;
|
float core_flush_prog;
|
||||||
|
bool cache_device_detached;
|
||||||
|
|
||||||
if (!by_id_path && !curr_cache->standby_detached) {
|
if (!by_id_path && !curr_cache->standby_detached) {
|
||||||
if (get_dev_path(curr_cache->device, curr_cache->device,
|
if (get_dev_path(curr_cache->device, curr_cache->device,
|
||||||
@ -2723,11 +2826,16 @@ int list_caches(unsigned int list_format, bool by_id_path)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cache_device_detached =
|
||||||
|
((curr_cache->state & (1 << ocf_cache_state_standby)) |
|
||||||
|
(curr_cache->state & (1 << ocf_cache_state_detached)))
|
||||||
|
;
|
||||||
|
|
||||||
fprintf(intermediate_file[1], TAG(TREE_BRANCH)
|
fprintf(intermediate_file[1], TAG(TREE_BRANCH)
|
||||||
"%s,%u,%s,%s,%s,%s\n",
|
"%s,%u,%s,%s,%s,%s\n",
|
||||||
"cache", /* type */
|
"cache", /* type */
|
||||||
curr_cache->id, /* id */
|
curr_cache->id, /* id */
|
||||||
curr_cache->standby_detached ? "-" : curr_cache->device, /* device path */
|
cache_device_detached ? "-" : curr_cache->device, /* device path */
|
||||||
tmp_status, /* cache status */
|
tmp_status, /* cache status */
|
||||||
mode_string, /* write policy */
|
mode_string, /* write policy */
|
||||||
cache_ctrl_dev /* device */);
|
cache_ctrl_dev /* device */);
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -116,6 +117,9 @@ int start_cache(uint16_t cache_id, unsigned int cache_init,
|
|||||||
ocf_cache_line_size_t line_size, int force);
|
ocf_cache_line_size_t line_size, int force);
|
||||||
int stop_cache(uint16_t cache_id, int flush);
|
int stop_cache(uint16_t cache_id, int flush);
|
||||||
|
|
||||||
|
int detach_cache(uint16_t cache_id);
|
||||||
|
int attach_cache(uint16_t cache_id, const char *cache_device, int force);
|
||||||
|
|
||||||
#ifdef WI_AVAILABLE
|
#ifdef WI_AVAILABLE
|
||||||
#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi|wo"
|
#define CAS_CLI_HELP_START_CACHE_MODES "wt|wb|wa|pt|wi|wo"
|
||||||
#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi|wo"
|
#define CAS_CLI_HELP_SET_CACHE_MODES "wt|wb|wa|pt|wi|wo"
|
||||||
|
@ -332,6 +332,13 @@ static cli_option start_options[] = {
|
|||||||
{0}
|
{0}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static cli_option attach_cache_options[] = {
|
||||||
|
{'d', "cache-device", CACHE_DEVICE_DESC, 1, "DEVICE", CLI_OPTION_REQUIRED},
|
||||||
|
{'i', "cache-id", CACHE_ID_DESC_LONG, 1, "ID", CLI_OPTION_REQUIRED},
|
||||||
|
{'f', "force", "Force attaching the cache device"},
|
||||||
|
{0}
|
||||||
|
};
|
||||||
|
|
||||||
static int check_fs(const char* device, bool force)
|
static int check_fs(const char* device, bool force)
|
||||||
{
|
{
|
||||||
char cache_dev_path[MAX_STR_LEN];
|
char cache_dev_path[MAX_STR_LEN];
|
||||||
@ -405,6 +412,20 @@ int validate_cache_path(const char* path, bool force)
|
|||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int handle_cache_attach(void)
|
||||||
|
{
|
||||||
|
return attach_cache(
|
||||||
|
command_args_values.cache_id,
|
||||||
|
command_args_values.cache_device,
|
||||||
|
command_args_values.force
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
int handle_cache_detach(void)
|
||||||
|
{
|
||||||
|
return detach_cache(command_args_values.cache_id);
|
||||||
|
}
|
||||||
|
|
||||||
int handle_start()
|
int handle_start()
|
||||||
{
|
{
|
||||||
int status;
|
int status;
|
||||||
@ -527,6 +548,11 @@ static cli_option stop_options[] = {
|
|||||||
{0}
|
{0}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static cli_option detach_options[] = {
|
||||||
|
{'i', "cache-id", CACHE_ID_DESC, 1, "ID", CLI_OPTION_REQUIRED},
|
||||||
|
{0}
|
||||||
|
};
|
||||||
|
|
||||||
int handle_stop()
|
int handle_stop()
|
||||||
{
|
{
|
||||||
return stop_cache(command_args_values.cache_id,
|
return stop_cache(command_args_values.cache_id,
|
||||||
@ -2204,6 +2230,26 @@ static cli_command cas_commands[] = {
|
|||||||
.flags = CLI_SU_REQUIRED,
|
.flags = CLI_SU_REQUIRED,
|
||||||
.help = NULL,
|
.help = NULL,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.name = "attach-cache",
|
||||||
|
.desc = "Attach cache device",
|
||||||
|
.long_desc = NULL,
|
||||||
|
.options = attach_cache_options,
|
||||||
|
.command_handle_opts = start_cache_command_handle_option,
|
||||||
|
.handle = handle_cache_attach,
|
||||||
|
.flags = CLI_SU_REQUIRED,
|
||||||
|
.help = NULL,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "detach-cache",
|
||||||
|
.desc = "Detach cache device",
|
||||||
|
.long_desc = NULL,
|
||||||
|
.options = detach_options,
|
||||||
|
.command_handle_opts = command_handle_option,
|
||||||
|
.handle = handle_cache_detach,
|
||||||
|
.flags = CLI_SU_REQUIRED,
|
||||||
|
.help = NULL,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.name = "stop-cache",
|
.name = "stop-cache",
|
||||||
.short_name = 'T',
|
.short_name = 'T',
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -178,6 +179,10 @@ struct {
|
|||||||
OCF_ERR_CACHE_LINE_SIZE_MISMATCH,
|
OCF_ERR_CACHE_LINE_SIZE_MISMATCH,
|
||||||
"Cache line size mismatch"
|
"Cache line size mismatch"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
OCF_ERR_CACHE_DETACHED,
|
||||||
|
"The operation is not permited while the cache is detached"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
OCF_ERR_CACHE_STANDBY,
|
OCF_ERR_CACHE_STANDBY,
|
||||||
"The operation is not permited while the cache is in the standby mode"
|
"The operation is not permited while the cache is in the standby mode"
|
||||||
@ -239,6 +244,11 @@ struct {
|
|||||||
"Device contains partitions.\nIf you want to continue, "
|
"Device contains partitions.\nIf you want to continue, "
|
||||||
"please use --force option.\nWarning: all data will be lost!"
|
"please use --force option.\nWarning: all data will be lost!"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
KCAS_ERR_DEVICE_PROPERTIES_MISMATCH,
|
||||||
|
"The new device's properties doesn't match the original cache device's"
|
||||||
|
" properties"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
KCAS_ERR_A_PART,
|
KCAS_ERR_A_PART,
|
||||||
"Formatting of partition is unsupported."
|
"Formatting of partition is unsupported."
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2021 Intel Corporation
|
* Copyright(c) 2012-2021 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@ -124,6 +125,8 @@ int table_set(struct table *t, int y, int x, char *c)
|
|||||||
int len = strnlen(c, MAX_STR_LEN);
|
int len = strnlen(c, MAX_STR_LEN);
|
||||||
if (len >= MAX_STR_LEN) {
|
if (len >= MAX_STR_LEN) {
|
||||||
return 1;
|
return 1;
|
||||||
|
} else if (len == 0) {
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* step 1: ensure that space for row y is allocated */
|
/* step 1: ensure that space for row y is allocated */
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Copyright(c) 2012-2022 Intel Corporation
|
# Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
# Copyright(c) 2024 Huawei Technologies
|
||||||
# SPDX-License-Identifier: BSD-3-Clause
|
# SPDX-License-Identifier: BSD-3-Clause
|
||||||
#
|
#
|
||||||
|
|
||||||
@ -10,6 +11,7 @@
|
|||||||
check() {
|
check() {
|
||||||
cur_name=$(basename $2)
|
cur_name=$(basename $2)
|
||||||
config_file_path=$1
|
config_file_path=$1
|
||||||
|
|
||||||
if compile_module $cur_name "struct queue_limits q; q.limits_aux;" "linux/blkdev.h"
|
if compile_module $cur_name "struct queue_limits q; q.limits_aux;" "linux/blkdev.h"
|
||||||
then
|
then
|
||||||
echo $cur_name "1" >> $config_file_path
|
echo $cur_name "1" >> $config_file_path
|
||||||
@ -34,53 +36,73 @@ apply() {
|
|||||||
"1")
|
"1")
|
||||||
add_function "
|
add_function "
|
||||||
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
||||||
struct request_queue *cache_q, struct request_queue *core_q)
|
struct queue_limits *cache_q_limits, struct request_queue *core_q)
|
||||||
{
|
{
|
||||||
struct queue_limits_aux *l_aux = exp_q->limits.limits_aux;
|
struct queue_limits_aux *l_aux = exp_q->limits.limits_aux;
|
||||||
exp_q->limits = cache_q->limits;
|
exp_q->limits = *cache_q_limits;
|
||||||
exp_q->limits.limits_aux = l_aux;
|
exp_q->limits.limits_aux = l_aux;
|
||||||
if (exp_q->limits.limits_aux && cache_q->limits.limits_aux)
|
if (exp_q->limits.limits_aux && cache_q_limits->limits_aux)
|
||||||
*exp_q->limits.limits_aux = *cache_q->limits.limits_aux;
|
*exp_q->limits.limits_aux = *cache_q_limits->limits_aux;
|
||||||
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
||||||
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
||||||
exp_q->limits.max_segments = core_q->limits.max_segments;
|
exp_q->limits.max_segments = core_q->limits.max_segments;
|
||||||
exp_q->limits.max_write_same_sectors = 0;
|
exp_q->limits.max_write_same_sectors = 0;
|
||||||
|
}"
|
||||||
|
|
||||||
|
add_function "
|
||||||
|
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
|
||||||
|
{
|
||||||
if (queue_virt_boundary(cache_q))
|
if (queue_virt_boundary(cache_q))
|
||||||
queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q);
|
queue_flag_set(QUEUE_FLAG_NOMERGES, cache_q);
|
||||||
}" ;;
|
}" ;;
|
||||||
"2")
|
"2")
|
||||||
add_function "
|
add_function "
|
||||||
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
||||||
struct request_queue *cache_q, struct request_queue *core_q)
|
struct queue_limits *cache_q_limits, struct request_queue *core_q)
|
||||||
{
|
{
|
||||||
exp_q->limits = cache_q->limits;
|
exp_q->limits = *cache_q_limits;
|
||||||
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
||||||
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
||||||
exp_q->limits.max_segments = core_q->limits.max_segments;
|
exp_q->limits.max_segments = core_q->limits.max_segments;
|
||||||
exp_q->limits.max_write_same_sectors = 0;
|
exp_q->limits.max_write_same_sectors = 0;
|
||||||
exp_q->limits.max_write_zeroes_sectors = 0;
|
exp_q->limits.max_write_zeroes_sectors = 0;
|
||||||
|
}"
|
||||||
|
|
||||||
|
add_function "
|
||||||
|
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
|
||||||
|
{
|
||||||
}" ;;
|
}" ;;
|
||||||
"3")
|
"3")
|
||||||
add_function "
|
add_function "
|
||||||
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
||||||
struct request_queue *cache_q, struct request_queue *core_q)
|
struct queue_limits *cache_q_limits, struct request_queue *core_q)
|
||||||
{
|
{
|
||||||
exp_q->limits = cache_q->limits;
|
exp_q->limits = *cache_q_limits;
|
||||||
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
||||||
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
||||||
exp_q->limits.max_segments = core_q->limits.max_segments;
|
exp_q->limits.max_segments = core_q->limits.max_segments;
|
||||||
exp_q->limits.max_write_zeroes_sectors = 0;
|
exp_q->limits.max_write_zeroes_sectors = 0;
|
||||||
|
}"
|
||||||
|
|
||||||
|
add_function "
|
||||||
|
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
|
||||||
|
{
|
||||||
}" ;;
|
}" ;;
|
||||||
"4")
|
"4")
|
||||||
add_function "
|
add_function "
|
||||||
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
static inline void cas_copy_queue_limits(struct request_queue *exp_q,
|
||||||
struct request_queue *cache_q, struct request_queue *core_q)
|
struct queue_limits *cache_q_limits, struct request_queue *core_q)
|
||||||
{
|
{
|
||||||
exp_q->limits = cache_q->limits;
|
exp_q->limits = *cache_q_limits;
|
||||||
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
exp_q->limits.max_sectors = core_q->limits.max_sectors;
|
||||||
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
exp_q->limits.max_hw_sectors = core_q->limits.max_hw_sectors;
|
||||||
exp_q->limits.max_segments = core_q->limits.max_segments;
|
exp_q->limits.max_segments = core_q->limits.max_segments;
|
||||||
exp_q->limits.max_write_same_sectors = 0;
|
exp_q->limits.max_write_same_sectors = 0;
|
||||||
|
}"
|
||||||
|
|
||||||
|
add_function "
|
||||||
|
static inline void cas_cache_set_no_merges_flag(struct request_queue *cache_q)
|
||||||
|
{
|
||||||
}" ;;
|
}" ;;
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -70,6 +71,11 @@ struct cache_priv {
|
|||||||
ocf_queue_t mngt_queue;
|
ocf_queue_t mngt_queue;
|
||||||
void *attach_context;
|
void *attach_context;
|
||||||
bool cache_exp_obj_initialized;
|
bool cache_exp_obj_initialized;
|
||||||
|
struct {
|
||||||
|
struct queue_limits queue_limits;
|
||||||
|
bool fua;
|
||||||
|
bool flush;
|
||||||
|
} device_properties;
|
||||||
ocf_queue_t io_queues[];
|
ocf_queue_t io_queues[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1271,7 +1271,10 @@ static int cache_mngt_update_core_uuid(ocf_cache_t cache, const char *core_name,
|
|||||||
if (result)
|
if (result)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
return _cache_mngt_save_sync(cache);
|
if (ocf_cache_is_device_attached(cache))
|
||||||
|
result = _cache_mngt_save_sync(cache);
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _cache_mngt_log_core_device_path(ocf_core_t core)
|
static void _cache_mngt_log_core_device_path(ocf_core_t core)
|
||||||
@ -1717,7 +1720,12 @@ int cache_mngt_set_partitions(const char *cache_name, size_t name_len,
|
|||||||
|
|
||||||
if (ocf_cache_is_standby(cache)) {
|
if (ocf_cache_is_standby(cache)) {
|
||||||
result = -OCF_ERR_CACHE_STANDBY;
|
result = -OCF_ERR_CACHE_STANDBY;
|
||||||
goto out_standby;
|
goto out_not_running;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ocf_cache_is_device_attached(cache)) {
|
||||||
|
result = -OCF_ERR_CACHE_DETACHED;
|
||||||
|
goto out_not_running;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (class_id = 0; class_id < OCF_USER_IO_CLASS_MAX; class_id++) {
|
for (class_id = 0; class_id < OCF_USER_IO_CLASS_MAX; class_id++) {
|
||||||
@ -1752,7 +1760,7 @@ out_cls:
|
|||||||
while (class_id--)
|
while (class_id--)
|
||||||
cas_cls_rule_destroy(cache, cls_rule[class_id]);
|
cas_cls_rule_destroy(cache, cls_rule[class_id]);
|
||||||
}
|
}
|
||||||
out_standby:
|
out_not_running:
|
||||||
ocf_mngt_cache_put(cache);
|
ocf_mngt_cache_put(cache);
|
||||||
out_get:
|
out_get:
|
||||||
kfree(io_class_cfg);
|
kfree(io_class_cfg);
|
||||||
@ -1868,6 +1876,39 @@ static int cache_mngt_create_cache_device_cfg(
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cache_mngt_attach_cache_cfg(char *cache_name, size_t name_len,
|
||||||
|
struct ocf_mngt_cache_config *cfg,
|
||||||
|
struct ocf_mngt_cache_attach_config *attach_cfg,
|
||||||
|
struct kcas_start_cache *cmd)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
|
||||||
|
if (!cmd)
|
||||||
|
return -OCF_ERR_INVAL;
|
||||||
|
|
||||||
|
memset(cfg, 0, sizeof(*cfg));
|
||||||
|
memset(attach_cfg, 0, sizeof(*attach_cfg));
|
||||||
|
|
||||||
|
result = cache_mngt_create_cache_device_cfg(&attach_cfg->device,
|
||||||
|
cmd->cache_path_name);
|
||||||
|
if (result)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
//TODO maybe attach should allow to change cache line size?
|
||||||
|
//cfg->cache_line_size = cmd->line_size;
|
||||||
|
cfg->use_submit_io_fast = !use_io_scheduler;
|
||||||
|
cfg->locked = true;
|
||||||
|
cfg->metadata_volatile = true;
|
||||||
|
|
||||||
|
cfg->backfill.max_queue_size = max_writeback_queue_size;
|
||||||
|
cfg->backfill.queue_unblock_size = writeback_queue_unblock_size;
|
||||||
|
attach_cfg->cache_line_size = cmd->line_size;
|
||||||
|
attach_cfg->force = cmd->force;
|
||||||
|
attach_cfg->discard_on_start = true;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void cache_mngt_destroy_cache_device_cfg(
|
static void cache_mngt_destroy_cache_device_cfg(
|
||||||
struct ocf_mngt_cache_device_config *cfg)
|
struct ocf_mngt_cache_device_config *cfg)
|
||||||
{
|
{
|
||||||
@ -2048,7 +2089,6 @@ static void init_instance_complete(struct _cache_mngt_attach_context *ctx,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void calculate_min_ram_size(ocf_cache_t cache,
|
static void calculate_min_ram_size(ocf_cache_t cache,
|
||||||
struct _cache_mngt_attach_context *ctx)
|
struct _cache_mngt_attach_context *ctx)
|
||||||
{
|
{
|
||||||
@ -2078,6 +2118,30 @@ end:
|
|||||||
printk(KERN_WARNING "Cannot calculate amount of DRAM needed\n");
|
printk(KERN_WARNING "Cannot calculate amount of DRAM needed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void _cache_mngt_attach_complete(ocf_cache_t cache, void *priv,
|
||||||
|
int error)
|
||||||
|
{
|
||||||
|
struct _cache_mngt_attach_context *ctx = priv;
|
||||||
|
int caller_status;
|
||||||
|
char *path;
|
||||||
|
|
||||||
|
cache_mngt_destroy_cache_device_cfg(&ctx->device_cfg);
|
||||||
|
|
||||||
|
if (!error) {
|
||||||
|
path = (char *)ocf_volume_get_uuid(ocf_cache_get_volume(
|
||||||
|
cache))->data;
|
||||||
|
printk(KERN_INFO "Succsessfully attached %s\n", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
caller_status = _cache_mngt_async_callee_set_result(&ctx->async, error);
|
||||||
|
if (caller_status != -KCAS_ERR_WAITING_INTERRUPTED)
|
||||||
|
return;
|
||||||
|
|
||||||
|
kfree(ctx);
|
||||||
|
ocf_mngt_cache_unlock(cache);
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
}
|
||||||
|
|
||||||
static void _cache_mngt_start_complete(ocf_cache_t cache, void *priv, int error)
|
static void _cache_mngt_start_complete(ocf_cache_t cache, void *priv, int error)
|
||||||
{
|
{
|
||||||
struct _cache_mngt_attach_context *ctx = priv;
|
struct _cache_mngt_attach_context *ctx = priv;
|
||||||
@ -2202,6 +2266,42 @@ out_bdev:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void volume_set_no_merges_flag_helper(ocf_cache_t cache)
|
||||||
|
{
|
||||||
|
struct request_queue *cache_q;
|
||||||
|
struct bd_object *bvol;
|
||||||
|
struct block_device *bd;
|
||||||
|
ocf_volume_t volume;
|
||||||
|
|
||||||
|
volume = ocf_cache_get_volume(cache);
|
||||||
|
if (!volume)
|
||||||
|
return;
|
||||||
|
|
||||||
|
bvol = bd_object(volume);
|
||||||
|
bd = cas_disk_get_blkdev(bvol->dsk);
|
||||||
|
cache_q = bd->bd_disk->queue;
|
||||||
|
|
||||||
|
cas_cache_set_no_merges_flag(cache_q);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void _cache_save_device_properties(ocf_cache_t cache)
|
||||||
|
{
|
||||||
|
struct block_device *bd;
|
||||||
|
struct bd_object *bvol;
|
||||||
|
struct request_queue *cache_q;
|
||||||
|
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
|
||||||
|
|
||||||
|
bvol = bd_object(ocf_cache_get_volume(cache));
|
||||||
|
bd = cas_disk_get_blkdev(bvol->dsk);
|
||||||
|
cache_q = bd->bd_disk->queue;
|
||||||
|
|
||||||
|
cache_priv->device_properties.queue_limits = cache_q->limits;
|
||||||
|
cache_priv->device_properties.flush =
|
||||||
|
CAS_CHECK_QUEUE_FLUSH(cache_q);
|
||||||
|
cache_priv->device_properties.fua =
|
||||||
|
CAS_CHECK_QUEUE_FUA(cache_q);
|
||||||
|
}
|
||||||
|
|
||||||
static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
|
static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
|
||||||
bool activate)
|
bool activate)
|
||||||
{
|
{
|
||||||
@ -2219,6 +2319,10 @@ static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
ctx->cls_inited = true;
|
ctx->cls_inited = true;
|
||||||
|
|
||||||
|
volume_set_no_merges_flag_helper(cache);
|
||||||
|
|
||||||
|
_cache_save_device_properties(cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (activate)
|
if (activate)
|
||||||
@ -2258,14 +2362,21 @@ static int _cache_start_finalize(ocf_cache_t cache, int init_mode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg,
|
static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg,
|
||||||
bool force)
|
bool force, bool reattach, ocf_cache_t cache)
|
||||||
{
|
{
|
||||||
char holder[] = "CAS START\n";
|
char holder[] = "CAS START\n";
|
||||||
cas_bdev_handle_t bdev_handle;
|
cas_bdev_handle_t bdev_handle;
|
||||||
struct block_device *bdev;
|
struct block_device *bdev;
|
||||||
int part_count;
|
int part_count;
|
||||||
bool is_part;
|
bool is_part;
|
||||||
|
bool reattach_properties_diff = false;
|
||||||
|
struct cache_priv *cache_priv;
|
||||||
const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(cfg->volume);
|
const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(cfg->volume);
|
||||||
|
/* The only reason to use blk_stack_limits() is checking compatibility of
|
||||||
|
* the new device with the original cache. But since the functions modifies
|
||||||
|
* content of queue_limits, we use copy of the original struct.
|
||||||
|
*/
|
||||||
|
struct queue_limits tmp_limits;
|
||||||
|
|
||||||
bdev_handle = cas_bdev_open_by_path(uuid->data,
|
bdev_handle = cas_bdev_open_by_path(uuid->data,
|
||||||
(CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder);
|
(CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder);
|
||||||
@ -2278,12 +2389,48 @@ static int cache_mngt_check_bdev(struct ocf_mngt_cache_device_config *cfg,
|
|||||||
|
|
||||||
is_part = (cas_bdev_whole(bdev) != bdev);
|
is_part = (cas_bdev_whole(bdev) != bdev);
|
||||||
part_count = cas_blk_get_part_count(bdev);
|
part_count = cas_blk_get_part_count(bdev);
|
||||||
|
|
||||||
|
if (reattach) {
|
||||||
|
ENV_BUG_ON(!cache);
|
||||||
|
|
||||||
|
cache_priv = ocf_cache_get_priv(cache);
|
||||||
|
tmp_limits = cache_priv->device_properties.queue_limits;
|
||||||
|
|
||||||
|
if (blk_stack_limits(&tmp_limits, &bdev->bd_disk->queue->limits, 0)) {
|
||||||
|
reattach_properties_diff = true;
|
||||||
|
printk(KERN_WARNING "New cache device block properties "
|
||||||
|
"differ from the previous one.\n");
|
||||||
|
}
|
||||||
|
if (tmp_limits.misaligned) {
|
||||||
|
reattach_properties_diff = true;
|
||||||
|
printk(KERN_WARNING "New cache device block interval "
|
||||||
|
"doesn't line up with the previous one.\n");
|
||||||
|
}
|
||||||
|
if (CAS_CHECK_QUEUE_FLUSH(bdev->bd_disk->queue) !=
|
||||||
|
cache_priv->device_properties.flush) {
|
||||||
|
reattach_properties_diff = true;
|
||||||
|
printk(KERN_WARNING "New cache device %s support flush "
|
||||||
|
"in contrary to the previous cache device.\n",
|
||||||
|
cache_priv->device_properties.flush ? "doesn't" : "does");
|
||||||
|
}
|
||||||
|
if (CAS_CHECK_QUEUE_FUA(bdev->bd_disk->queue) !=
|
||||||
|
cache_priv->device_properties.fua) {
|
||||||
|
reattach_properties_diff = true;
|
||||||
|
printk(KERN_WARNING "New cache device %s support fua "
|
||||||
|
"in contrary to the previous cache device.\n",
|
||||||
|
cache_priv->device_properties.fua ? "doesn't" : "does");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cas_bdev_release(bdev_handle,
|
cas_bdev_release(bdev_handle,
|
||||||
(CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder);
|
(CAS_BLK_MODE_EXCL | CAS_BLK_MODE_READ), holder);
|
||||||
|
|
||||||
if (!is_part && part_count > 1 && !force)
|
if (!is_part && part_count > 1 && !force)
|
||||||
return -KCAS_ERR_CONTAINS_PART;
|
return -KCAS_ERR_CONTAINS_PART;
|
||||||
|
|
||||||
|
if (reattach_properties_diff)
|
||||||
|
return -KCAS_ERR_DEVICE_PROPERTIES_MISMATCH;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2362,6 +2509,72 @@ int cache_mngt_create_cache_standby_activate_cfg(
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void _cache_mngt_detach_cache_complete(ocf_cache_t cache, void *priv,
|
||||||
|
int error)
|
||||||
|
{
|
||||||
|
struct _cache_mngt_async_context *context = priv;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = _cache_mngt_async_callee_set_result(context, error);
|
||||||
|
|
||||||
|
if (result != -KCAS_ERR_WAITING_INTERRUPTED)
|
||||||
|
return;
|
||||||
|
|
||||||
|
kfree(context);
|
||||||
|
ocf_mngt_cache_unlock(cache);
|
||||||
|
kfree(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
int cache_mngt_attach_device(const char *cache_name, size_t name_len,
|
||||||
|
const char *device, struct ocf_mngt_cache_attach_config *attach_cfg)
|
||||||
|
{
|
||||||
|
struct _cache_mngt_attach_context *context;
|
||||||
|
ocf_cache_t cache;
|
||||||
|
int result = 0;
|
||||||
|
|
||||||
|
result = ocf_mngt_cache_get_by_name(cas_ctx, cache_name,
|
||||||
|
OCF_CACHE_NAME_SIZE, &cache);
|
||||||
|
if (result)
|
||||||
|
goto err_get;
|
||||||
|
|
||||||
|
result = _cache_mngt_lock_sync(cache);
|
||||||
|
if (result)
|
||||||
|
goto err_lock;
|
||||||
|
|
||||||
|
result = cache_mngt_check_bdev(&attach_cfg->device,
|
||||||
|
attach_cfg->force, true, cache);
|
||||||
|
if (result)
|
||||||
|
goto err_ctx;
|
||||||
|
|
||||||
|
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||||
|
if (!context) {
|
||||||
|
result = -ENOMEM;
|
||||||
|
goto err_ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
context->device_cfg = attach_cfg->device;
|
||||||
|
|
||||||
|
_cache_mngt_async_context_init(&context->async);
|
||||||
|
|
||||||
|
ocf_mngt_cache_attach(cache, attach_cfg, _cache_mngt_attach_complete,
|
||||||
|
context);
|
||||||
|
result = wait_for_completion_interruptible(&context->async.cmpl);
|
||||||
|
|
||||||
|
result = _cache_mngt_async_caller_set_result(&context->async, result);
|
||||||
|
if (result == -KCAS_ERR_WAITING_INTERRUPTED)
|
||||||
|
goto err_get;
|
||||||
|
|
||||||
|
volume_set_no_merges_flag_helper(cache);
|
||||||
|
|
||||||
|
kfree(context);
|
||||||
|
err_ctx:
|
||||||
|
ocf_mngt_cache_unlock(cache);
|
||||||
|
err_lock:
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
err_get:
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
int cache_mngt_activate(struct ocf_mngt_cache_standby_activate_config *cfg,
|
int cache_mngt_activate(struct ocf_mngt_cache_standby_activate_config *cfg,
|
||||||
struct kcas_standby_activate *cmd)
|
struct kcas_standby_activate *cmd)
|
||||||
{
|
{
|
||||||
@ -2396,7 +2609,7 @@ int cache_mngt_activate(struct ocf_mngt_cache_standby_activate_config *cfg,
|
|||||||
* to compare data on drive and in DRAM to provide more specific
|
* to compare data on drive and in DRAM to provide more specific
|
||||||
* error code.
|
* error code.
|
||||||
*/
|
*/
|
||||||
result = cache_mngt_check_bdev(&cfg->device, true);
|
result = cache_mngt_check_bdev(&cfg->device, true, false, NULL);
|
||||||
if (result)
|
if (result)
|
||||||
goto out_cache_unlock;
|
goto out_cache_unlock;
|
||||||
|
|
||||||
@ -2493,7 +2706,7 @@ int cache_mngt_init_instance(struct ocf_mngt_cache_config *cfg,
|
|||||||
if (!try_module_get(THIS_MODULE))
|
if (!try_module_get(THIS_MODULE))
|
||||||
return -KCAS_ERR_SYSTEM;
|
return -KCAS_ERR_SYSTEM;
|
||||||
|
|
||||||
result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force);
|
result = cache_mngt_check_bdev(&attach_cfg->device, attach_cfg->force, false, NULL);
|
||||||
if (result) {
|
if (result) {
|
||||||
module_put(THIS_MODULE);
|
module_put(THIS_MODULE);
|
||||||
return result;
|
return result;
|
||||||
@ -2864,6 +3077,11 @@ int cache_mngt_set_cache_mode(const char *cache_name, size_t name_len,
|
|||||||
goto put;
|
goto put;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!ocf_cache_is_device_attached(cache)) {
|
||||||
|
result = -OCF_ERR_CACHE_DETACHED;
|
||||||
|
goto put;
|
||||||
|
}
|
||||||
|
|
||||||
old_mode = ocf_cache_get_mode(cache);
|
old_mode = ocf_cache_get_mode(cache);
|
||||||
if (old_mode == mode) {
|
if (old_mode == mode) {
|
||||||
printk(KERN_INFO "%s is in requested cache mode already\n", cache_name);
|
printk(KERN_INFO "%s is in requested cache mode already\n", cache_name);
|
||||||
@ -2911,6 +3129,53 @@ put:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cache_mngt_detach_cache(const char *cache_name, size_t name_len)
|
||||||
|
{
|
||||||
|
ocf_cache_t cache;
|
||||||
|
int status = 0;
|
||||||
|
struct _cache_mngt_async_context *context;
|
||||||
|
|
||||||
|
context = kmalloc(sizeof(*context), GFP_KERNEL);
|
||||||
|
if (!context)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
_cache_mngt_async_context_init(context);
|
||||||
|
|
||||||
|
status = ocf_mngt_cache_get_by_name(cas_ctx, cache_name,
|
||||||
|
name_len, &cache);
|
||||||
|
if (status)
|
||||||
|
goto err_get_cache;
|
||||||
|
|
||||||
|
if (ocf_cache_is_running(cache))
|
||||||
|
status = _cache_flush_with_lock(cache);
|
||||||
|
if (status)
|
||||||
|
goto err_flush;
|
||||||
|
|
||||||
|
status = _cache_mngt_lock_sync(cache);
|
||||||
|
if (status)
|
||||||
|
goto err_lock;
|
||||||
|
|
||||||
|
ocf_mngt_cache_detach(cache, _cache_mngt_detach_cache_complete, context);
|
||||||
|
|
||||||
|
status = wait_for_completion_interruptible(&context->cmpl);
|
||||||
|
status = _cache_mngt_async_caller_set_result(context, status);
|
||||||
|
|
||||||
|
if (status == -KCAS_ERR_WAITING_INTERRUPTED) {
|
||||||
|
printk(KERN_WARNING "Waiting for cache detach interrupted. "
|
||||||
|
"The operation will finish asynchronously.\n");
|
||||||
|
goto err_int;
|
||||||
|
}
|
||||||
|
|
||||||
|
ocf_mngt_cache_unlock(cache);
|
||||||
|
err_lock:
|
||||||
|
err_flush:
|
||||||
|
ocf_mngt_cache_put(cache);
|
||||||
|
err_get_cache:
|
||||||
|
kfree(context);
|
||||||
|
err_int:
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief routine implements --stop-cache command.
|
* @brief routine implements --stop-cache command.
|
||||||
* @param[in] cache_name caching device name to be removed
|
* @param[in] cache_name caching device name to be removed
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
#ifndef __LAYER_CACHE_MANAGEMENT_H__
|
#ifndef __LAYER_CACHE_MANAGEMENT_H__
|
||||||
@ -42,6 +43,11 @@ int cache_mngt_reset_stats(const char *cache_name, size_t cache_name_len,
|
|||||||
int cache_mngt_set_partitions(const char *cache_name, size_t name_len,
|
int cache_mngt_set_partitions(const char *cache_name, size_t name_len,
|
||||||
struct kcas_io_classes *cfg);
|
struct kcas_io_classes *cfg);
|
||||||
|
|
||||||
|
int cache_mngt_detach_cache(const char *cache_name, size_t name_len);
|
||||||
|
|
||||||
|
int cache_mngt_attach_device(const char *cache_name, size_t name_len,
|
||||||
|
const char *device, struct ocf_mngt_cache_attach_config *attach_cfg);
|
||||||
|
|
||||||
int cache_mngt_exit_instance(const char *cache_name, size_t name_len,
|
int cache_mngt_exit_instance(const char *cache_name, size_t name_len,
|
||||||
int flush);
|
int flush);
|
||||||
|
|
||||||
@ -49,6 +55,11 @@ int cache_mngt_create_cache_cfg(struct ocf_mngt_cache_config *cfg,
|
|||||||
struct ocf_mngt_cache_attach_config *attach_cfg,
|
struct ocf_mngt_cache_attach_config *attach_cfg,
|
||||||
struct kcas_start_cache *cmd);
|
struct kcas_start_cache *cmd);
|
||||||
|
|
||||||
|
int cache_mngt_attach_cache_cfg(char *cache_name, size_t name_len,
|
||||||
|
struct ocf_mngt_cache_config *cfg,
|
||||||
|
struct ocf_mngt_cache_attach_config *attach_cfg,
|
||||||
|
struct kcas_start_cache *cmd);
|
||||||
|
|
||||||
int cache_mngt_core_pool_get_paths(struct kcas_core_pool_path *cmd_info);
|
int cache_mngt_core_pool_get_paths(struct kcas_core_pool_path *cmd_info);
|
||||||
|
|
||||||
int cache_mngt_core_pool_remove(struct kcas_core_pool_remove *cmd_info);
|
int cache_mngt_core_pool_remove(struct kcas_core_pool_remove *cmd_info);
|
||||||
|
@ -78,6 +78,41 @@ long cas_service_ioctl_ctrl(struct file *filp, unsigned int cmd,
|
|||||||
RETURN_CMD_RESULT(cmd_info, arg, retval);
|
RETURN_CMD_RESULT(cmd_info, arg, retval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case KCAS_IOCTL_ATTACH_CACHE: {
|
||||||
|
struct kcas_start_cache *cmd_info;
|
||||||
|
struct ocf_mngt_cache_config cfg;
|
||||||
|
struct ocf_mngt_cache_attach_config attach_cfg;
|
||||||
|
char cache_name[OCF_CACHE_NAME_SIZE];
|
||||||
|
|
||||||
|
GET_CMD_INFO(cmd_info, arg);
|
||||||
|
|
||||||
|
cache_name_from_id(cache_name, cmd_info->cache_id);
|
||||||
|
|
||||||
|
retval = cache_mngt_attach_cache_cfg(cache_name, OCF_CACHE_NAME_SIZE,
|
||||||
|
&cfg, &attach_cfg, cmd_info);
|
||||||
|
if (retval)
|
||||||
|
RETURN_CMD_RESULT(cmd_info, arg, retval);
|
||||||
|
|
||||||
|
retval = cache_mngt_attach_device(cache_name, OCF_CACHE_NAME_SIZE,
|
||||||
|
cmd_info->cache_path_name, &attach_cfg);
|
||||||
|
|
||||||
|
RETURN_CMD_RESULT(cmd_info, arg, retval);
|
||||||
|
}
|
||||||
|
|
||||||
|
case KCAS_IOCTL_DETACH_CACHE: {
|
||||||
|
struct kcas_stop_cache *cmd_info;
|
||||||
|
char cache_name[OCF_CACHE_NAME_SIZE];
|
||||||
|
|
||||||
|
GET_CMD_INFO(cmd_info, arg);
|
||||||
|
|
||||||
|
cache_name_from_id(cache_name, cmd_info->cache_id);
|
||||||
|
|
||||||
|
retval = cache_mngt_detach_cache(cache_name,
|
||||||
|
OCF_CACHE_NAME_SIZE);
|
||||||
|
|
||||||
|
RETURN_CMD_RESULT(cmd_info, arg, retval);
|
||||||
|
}
|
||||||
|
|
||||||
case KCAS_IOCTL_SET_CACHE_STATE: {
|
case KCAS_IOCTL_SET_CACHE_STATE: {
|
||||||
struct kcas_set_cache_state *cmd_info;
|
struct kcas_set_cache_state *cmd_info;
|
||||||
char cache_name[OCF_CACHE_NAME_SIZE];
|
char cache_name[OCF_CACHE_NAME_SIZE];
|
||||||
|
@ -35,35 +35,29 @@ static void blkdev_set_exported_object_flush_fua(ocf_core_t core)
|
|||||||
{
|
{
|
||||||
ocf_cache_t cache = ocf_core_get_cache(core);
|
ocf_cache_t cache = ocf_core_get_cache(core);
|
||||||
ocf_volume_t core_vol = ocf_core_get_volume(core);
|
ocf_volume_t core_vol = ocf_core_get_volume(core);
|
||||||
ocf_volume_t cache_vol = ocf_cache_get_volume(cache);
|
struct bd_object *bd_core_vol;
|
||||||
struct bd_object *bd_core_vol, *bd_cache_vol;
|
struct request_queue *core_q, *exp_q;
|
||||||
struct request_queue *core_q, *exp_q, *cache_q;
|
|
||||||
bool flush, fua;
|
bool flush, fua;
|
||||||
|
struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
|
||||||
BUG_ON(!cache_vol);
|
|
||||||
|
|
||||||
bd_core_vol = bd_object(core_vol);
|
bd_core_vol = bd_object(core_vol);
|
||||||
bd_cache_vol = bd_object(cache_vol);
|
|
||||||
|
|
||||||
core_q = cas_disk_get_queue(bd_core_vol->dsk);
|
core_q = cas_disk_get_queue(bd_core_vol->dsk);
|
||||||
exp_q = cas_exp_obj_get_queue(bd_core_vol->dsk);
|
exp_q = cas_exp_obj_get_queue(bd_core_vol->dsk);
|
||||||
cache_q = cas_disk_get_queue(bd_cache_vol->dsk);
|
|
||||||
|
|
||||||
flush = (CAS_CHECK_QUEUE_FLUSH(core_q) || CAS_CHECK_QUEUE_FLUSH(cache_q));
|
flush = (CAS_CHECK_QUEUE_FLUSH(core_q) ||
|
||||||
fua = (CAS_CHECK_QUEUE_FUA(core_q) || CAS_CHECK_QUEUE_FUA(cache_q));
|
cache_priv->device_properties.flush);
|
||||||
|
fua = (CAS_CHECK_QUEUE_FUA(core_q) || cache_priv->device_properties.fua);
|
||||||
|
|
||||||
cas_set_queue_flush_fua(exp_q, flush, fua);
|
cas_set_queue_flush_fua(exp_q, flush, fua);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blkdev_set_discard_properties(ocf_cache_t cache,
|
static void blkdev_set_discard_properties(ocf_cache_t cache,
|
||||||
struct request_queue *exp_q, struct block_device *cache_bd,
|
struct request_queue *exp_q, struct block_device *core_bd,
|
||||||
struct block_device *core_bd, sector_t core_sectors)
|
sector_t core_sectors)
|
||||||
{
|
{
|
||||||
struct request_queue *core_q;
|
struct request_queue *core_q;
|
||||||
struct request_queue *cache_q;
|
|
||||||
|
|
||||||
core_q = bdev_get_queue(core_bd);
|
core_q = bdev_get_queue(core_bd);
|
||||||
cache_q = bdev_get_queue(cache_bd);
|
|
||||||
|
|
||||||
cas_set_discard_flag(exp_q);
|
cas_set_discard_flag(exp_q);
|
||||||
|
|
||||||
@ -91,38 +85,32 @@ static int blkdev_core_set_geometry(struct cas_disk *dsk, void *private)
|
|||||||
ocf_core_t core;
|
ocf_core_t core;
|
||||||
ocf_cache_t cache;
|
ocf_cache_t cache;
|
||||||
ocf_volume_t core_vol;
|
ocf_volume_t core_vol;
|
||||||
ocf_volume_t cache_vol;
|
struct request_queue *core_q, *exp_q;
|
||||||
struct bd_object *bd_cache_vol;
|
struct block_device *core_bd;
|
||||||
struct request_queue *core_q, *cache_q, *exp_q;
|
|
||||||
struct block_device *core_bd, *cache_bd;
|
|
||||||
sector_t sectors;
|
sector_t sectors;
|
||||||
const char *path;
|
const char *path;
|
||||||
|
struct cache_priv *cache_priv;
|
||||||
|
|
||||||
BUG_ON(!private);
|
BUG_ON(!private);
|
||||||
core = private;
|
core = private;
|
||||||
cache = ocf_core_get_cache(core);
|
cache = ocf_core_get_cache(core);
|
||||||
core_vol = ocf_core_get_volume(core);
|
core_vol = ocf_core_get_volume(core);
|
||||||
cache_vol = ocf_cache_get_volume(cache);
|
cache_priv = ocf_cache_get_priv(cache);
|
||||||
BUG_ON(!cache_vol);
|
|
||||||
|
|
||||||
bd_cache_vol = bd_object(cache_vol);
|
|
||||||
path = ocf_volume_get_uuid(core_vol)->data;
|
path = ocf_volume_get_uuid(core_vol)->data;
|
||||||
|
|
||||||
core_bd = cas_disk_get_blkdev(dsk);
|
core_bd = cas_disk_get_blkdev(dsk);
|
||||||
BUG_ON(!core_bd);
|
BUG_ON(!core_bd);
|
||||||
|
|
||||||
cache_bd = cas_disk_get_blkdev(bd_cache_vol->dsk);
|
|
||||||
BUG_ON(!cache_bd);
|
|
||||||
|
|
||||||
core_q = cas_bdev_whole(core_bd)->bd_disk->queue;
|
core_q = cas_bdev_whole(core_bd)->bd_disk->queue;
|
||||||
cache_q = cache_bd->bd_disk->queue;
|
|
||||||
exp_q = cas_exp_obj_get_queue(dsk);
|
exp_q = cas_exp_obj_get_queue(dsk);
|
||||||
|
|
||||||
sectors = ocf_volume_get_length(core_vol) >> SECTOR_SHIFT;
|
sectors = ocf_volume_get_length(core_vol) >> SECTOR_SHIFT;
|
||||||
|
|
||||||
set_capacity(cas_exp_obj_get_gendisk(dsk), sectors);
|
set_capacity(cas_exp_obj_get_gendisk(dsk), sectors);
|
||||||
|
|
||||||
cas_copy_queue_limits(exp_q, cache_q, core_q);
|
cas_copy_queue_limits(exp_q, &cache_priv->device_properties.queue_limits,
|
||||||
|
core_q);
|
||||||
|
|
||||||
if (exp_q->limits.logical_block_size >
|
if (exp_q->limits.logical_block_size >
|
||||||
core_q->limits.logical_block_size) {
|
core_q->limits.logical_block_size) {
|
||||||
@ -139,8 +127,7 @@ static int blkdev_core_set_geometry(struct cas_disk *dsk, void *private)
|
|||||||
|
|
||||||
blkdev_set_exported_object_flush_fua(core);
|
blkdev_set_exported_object_flush_fua(core);
|
||||||
|
|
||||||
blkdev_set_discard_properties(cache, exp_q, cache_bd, core_bd,
|
blkdev_set_discard_properties(cache, exp_q, core_bd, sectors);
|
||||||
sectors);
|
|
||||||
|
|
||||||
exp_q->queue_flags |= (1 << QUEUE_FLAG_NONROT);
|
exp_q->queue_flags |= (1 << QUEUE_FLAG_NONROT);
|
||||||
|
|
||||||
@ -469,7 +456,8 @@ static int blkdev_cache_set_geometry(struct cas_disk *dsk, void *private)
|
|||||||
|
|
||||||
set_capacity(cas_exp_obj_get_gendisk(dsk), sectors);
|
set_capacity(cas_exp_obj_get_gendisk(dsk), sectors);
|
||||||
|
|
||||||
cas_copy_queue_limits(exp_q, cache_q, cache_q);
|
cas_copy_queue_limits(exp_q, &cache_q->limits, cache_q);
|
||||||
|
cas_cache_set_no_merges_flag(cache_q);
|
||||||
|
|
||||||
blk_stack_limits(&exp_q->limits, &cache_q->limits, 0);
|
blk_stack_limits(&exp_q->limits, &cache_q->limits, 0);
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright(c) 2012-2022 Intel Corporation
|
* Copyright(c) 2012-2022 Intel Corporation
|
||||||
|
* Copyright(c) 2024 Huawei Technologies
|
||||||
* SPDX-License-Identifier: BSD-3-Clause
|
* SPDX-License-Identifier: BSD-3-Clause
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -405,6 +406,8 @@ struct kcas_standby_activate
|
|||||||
* 39 * KCAS_IOCTL_STANDBY_ACTIVATE * OK *
|
* 39 * KCAS_IOCTL_STANDBY_ACTIVATE * OK *
|
||||||
* 40 * KCAS_IOCTL_CORE_INFO * OK *
|
* 40 * KCAS_IOCTL_CORE_INFO * OK *
|
||||||
* 41 * KCAS_IOCTL_START_CACHE * OK *
|
* 41 * KCAS_IOCTL_START_CACHE * OK *
|
||||||
|
* 42 * KCAS_IOCTL_DETACH_CACHE * OK *
|
||||||
|
* 43 * KCAS_IOCTL_ATTACH_CACHE * OK *
|
||||||
*******************************************************************************
|
*******************************************************************************
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -503,6 +506,12 @@ struct kcas_standby_activate
|
|||||||
/** Start new cache instance, load cache or recover cache */
|
/** Start new cache instance, load cache or recover cache */
|
||||||
#define KCAS_IOCTL_START_CACHE _IOWR(KCAS_IOCTL_MAGIC, 41, struct kcas_start_cache)
|
#define KCAS_IOCTL_START_CACHE _IOWR(KCAS_IOCTL_MAGIC, 41, struct kcas_start_cache)
|
||||||
|
|
||||||
|
/** Detach cache device */
|
||||||
|
#define KCAS_IOCTL_DETACH_CACHE _IOWR(KCAS_IOCTL_MAGIC, 42, struct kcas_stop_cache)
|
||||||
|
|
||||||
|
/** Attach cache device */
|
||||||
|
#define KCAS_IOCTL_ATTACH_CACHE _IOWR(KCAS_IOCTL_MAGIC, 43, struct kcas_start_cache)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extended kernel CAS error codes
|
* Extended kernel CAS error codes
|
||||||
*/
|
*/
|
||||||
@ -545,6 +554,11 @@ enum kcas_error {
|
|||||||
/** Device contains partitions */
|
/** Device contains partitions */
|
||||||
KCAS_ERR_CONTAINS_PART,
|
KCAS_ERR_CONTAINS_PART,
|
||||||
|
|
||||||
|
/** The new device's properties doesn't match the original cache's
|
||||||
|
* properties
|
||||||
|
*/
|
||||||
|
KCAS_ERR_DEVICE_PROPERTIES_MISMATCH,
|
||||||
|
|
||||||
/** Given device is a partition */
|
/** Given device is a partition */
|
||||||
KCAS_ERR_A_PART,
|
KCAS_ERR_A_PART,
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user