Merge pull request #140 from robertbaldyga/example-port-to-new-api

Fix example
This commit is contained in:
Michal Rakowski 2019-05-09 14:18:36 +02:00 committed by GitHub
commit d76adbb2a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 140 additions and 20 deletions

View File

@ -24,7 +24,7 @@ $(PROGRAM): $(OBJS)
sync: sync:
@$(MAKE) -C ${OCFDIR} inc O=$(PWD) @$(MAKE) -C ${OCFDIR} inc O=$(PWD)
@$(MAKE) -C ${OCFDIR} src O=$(PWD) @$(MAKE) -C ${OCFDIR} src O=$(PWD)
@$(MAKE) -C ${OCFDIR} env O=$(PWD) ENV=posix @$(MAKE) -C ${OCFDIR} env O=$(PWD) OCF_ENV=posix
clean: clean:
@rm -rf $(PROGRAM) $(OBJS) @rm -rf $(PROGRAM) $(OBJS)

View File

@ -9,6 +9,14 @@
#include "data.h" #include "data.h"
#include "ctx.h" #include "ctx.h"
/*
* Cache private data. Used to share information between async contexts.
*/
struct cache_priv {
ocf_queue_t mngt_queue;
ocf_queue_t io_queue;
};
/* /*
* Helper function for error handling. * Helper function for error handling.
*/ */
@ -20,6 +28,8 @@ void error(char *msg)
/* /*
* Trigger queue asynchronously. Made synchronous for simplicity. * Trigger queue asynchronously. Made synchronous for simplicity.
* Notice that it makes all asynchronous calls synchronous, because
* asynchronism in OCF is achieved mostly by using queues.
*/ */
static inline void queue_kick_async(ocf_queue_t q) static inline void queue_kick_async(ocf_queue_t q)
{ {
@ -56,6 +66,26 @@ const struct ocf_queue_ops queue_ops = {
.stop = queue_stop, .stop = queue_stop,
}; };
/*
* Simple completion context. As lots of OCF API functions work asynchronously
* and call completion callback when job is done, we need some structure to
* share program state with completion callback. In this case we have single
* variable pointer to propagate error code.
*/
struct simple_context {
int *error;
};
/*
* Basic asynchronous completion callback. Just propagate error code.
*/
static void simple_complete(ocf_cache_t cache, void *priv, int error)
{
struct simple_context *context= priv;
*context->error = error;
}
/* /*
* Function starting cache and attaching cache device. * Function starting cache and attaching cache device.
*/ */
@ -63,9 +93,16 @@ int initialize_cache(ocf_ctx_t ctx, ocf_cache_t *cache)
{ {
struct ocf_mngt_cache_config cache_cfg = { }; struct ocf_mngt_cache_config cache_cfg = { };
struct ocf_mngt_cache_device_config device_cfg = { }; struct ocf_mngt_cache_device_config device_cfg = { };
ocf_queue_t queue; struct cache_priv *cache_priv;
struct simple_context context;
int ret; int ret;
/*
* Asynchronous callbacks will assign error code to ret. That
* way we have always the same variable holding last error code.
*/
context.error = &ret;
/* Cache configuration */ /* Cache configuration */
cache_cfg.backfill.max_queue_size = 65536; cache_cfg.backfill.max_queue_size = 65536;
cache_cfg.backfill.queue_unblock_size = 60000; cache_cfg.backfill.queue_unblock_size = 60000;
@ -81,27 +118,79 @@ int initialize_cache(ocf_ctx_t ctx, ocf_cache_t *cache)
if (ret) if (ret)
return ret; return ret;
/*
* Allocate cache private structure. We can not initialize it
* on stack, as it may be used in various async contexts
* throughout the entire live span of cache object.
*/
cache_priv = malloc(sizeof(*cache_priv));
if (!cache_priv)
return -ENOMEM;
/* Start cache */ /* Start cache */
ret = ocf_mngt_cache_start(ctx, cache, &cache_cfg); ret = ocf_mngt_cache_start(ctx, cache, &cache_cfg);
if (ret) if (ret)
return ret; goto err_priv;
ret = ocf_queue_create(*cache, &queue, &queue_ops); /* Assing cache priv structure to cache. */
if (!queue) { ocf_cache_set_priv(*cache, cache_priv);
ocf_mngt_cache_stop(*cache);
return -ENOMEM; /*
* Create management queue. It will be used for performing various
* asynchronous management operations, such as attaching cache volume
* or adding core object.
*/
ret = ocf_queue_create(*cache, &cache_priv->mngt_queue, &queue_ops);
if (ret) {
ocf_mngt_cache_stop(*cache, simple_complete, &context);
goto err_priv;
} }
ocf_cache_set_priv(*cache, queue); /*
* Assign management queue to cache. This has to be done before any
* other management operation. Management queue is treated specially,
* and it may not be used for submitting IO requests. It also will not
* be put on the cache stop - we have to put it manually at the end.
*/
ocf_mngt_cache_set_mngt_queue(*cache, cache_priv->mngt_queue);
/* Create queue which will be used for IO submission. */
ret = ocf_queue_create(*cache, &cache_priv->io_queue, &queue_ops);
if (ret)
goto err_cache;
/* Attach volume to cache */ /* Attach volume to cache */
ret = ocf_mngt_cache_attach(*cache, &device_cfg); ocf_mngt_cache_attach(*cache, &device_cfg, simple_complete, &context);
if (ret) { if (ret)
ocf_mngt_cache_stop(*cache); goto err_cache;
return ret;
}
return 0; return 0;
err_cache:
ocf_mngt_cache_stop(*cache, simple_complete, &context);
ocf_queue_put(cache_priv->mngt_queue);
err_priv:
free(cache_priv);
return ret;
}
/*
* Add core completion callback context. We need this to propagate error code
* and handle to freshly initialized core object.
*/
struct add_core_context {
ocf_core_t *core;
int *error;
};
/* Add core complete callback. Just rewrite args to context structure. */
static void add_core_complete(ocf_cache_t cache, ocf_core_t core,
void *priv, int error)
{
struct add_core_context *context = priv;
*context->core = core;
*context->error = error;
} }
/* /*
@ -110,8 +199,16 @@ int initialize_cache(ocf_ctx_t ctx, ocf_cache_t *cache)
int initialize_core(ocf_cache_t cache, ocf_core_t *core) int initialize_core(ocf_cache_t cache, ocf_core_t *core)
{ {
struct ocf_mngt_core_config core_cfg = { }; struct ocf_mngt_core_config core_cfg = { };
struct add_core_context context;
int ret; int ret;
/*
* Asynchronous callback will assign core handle to core,
* and to error code to ret.
*/
context.core = core;
context.error = &ret;
/* Core configuration */ /* Core configuration */
core_cfg.volume_type = VOL_TYPE; core_cfg.volume_type = VOL_TYPE;
core_cfg.name = "core1"; core_cfg.name = "core1";
@ -120,7 +217,9 @@ int initialize_core(ocf_cache_t cache, ocf_core_t *core)
return ret; return ret;
/* Add core to cache */ /* Add core to cache */
return ocf_mngt_cache_add_core(cache, core, &core_cfg); ocf_mngt_cache_add_core(cache, &core_cfg, add_core_complete, &context);
return ret;
} }
/* /*
@ -158,9 +257,9 @@ void complete_read(struct ocf_io *io, int error)
int submit_io(ocf_core_t core, struct volume_data *data, int submit_io(ocf_core_t core, struct volume_data *data,
uint64_t addr, uint64_t len, int dir, ocf_end_io_t cmpl) uint64_t addr, uint64_t len, int dir, ocf_end_io_t cmpl)
{ {
struct ocf_io *io;
ocf_cache_t cache = ocf_core_get_cache(core); ocf_cache_t cache = ocf_core_get_cache(core);
ocf_queue_t queue = (ocf_queue_t)ocf_cache_get_priv(cache); struct cache_priv *cache_priv = ocf_cache_get_priv(cache);
struct ocf_io *io;
/* Allocate new io */ /* Allocate new io */
io = ocf_core_new_io(core); io = ocf_core_new_io(core);
@ -172,7 +271,7 @@ int submit_io(ocf_core_t core, struct volume_data *data,
/* Assign data to io */ /* Assign data to io */
ocf_io_set_data(io, data, 0); ocf_io_set_data(io, data, 0);
/* Setup io queue to */ /* Setup io queue to */
ocf_io_set_queue(io, queue); ocf_io_set_queue(io, cache_priv->io_queue);
/* Setup completion function */ /* Setup completion function */
ocf_io_set_cmpl(io, NULL, NULL, cmpl); ocf_io_set_cmpl(io, NULL, NULL, cmpl);
/* Submit io */ /* Submit io */
@ -231,11 +330,23 @@ void perform_workload(ocf_core_t core)
*/ */
} }
static void remove_core_complete(void *priv, int error)
{
struct simple_context *context = priv;
*context->error = error;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct cache_priv *cache_priv;
struct simple_context context;
ocf_ctx_t ctx; ocf_ctx_t ctx;
ocf_cache_t cache1; ocf_cache_t cache1;
ocf_core_t core1; ocf_core_t core1;
int ret;
context.error = &ret;
/* Initialize OCF context */ /* Initialize OCF context */
if (ctx_init(&ctx)) if (ctx_init(&ctx))
@ -253,13 +364,22 @@ int main(int argc, char *argv[])
perform_workload(core1); perform_workload(core1);
/* Remove core from cache */ /* Remove core from cache */
if (ocf_mngt_cache_remove_core(core1)) ocf_mngt_cache_remove_core(core1, remove_core_complete, &context);
if (ret)
error("Unable to remove core\n"); error("Unable to remove core\n");
/* Stop cache */ /* Stop cache */
if (ocf_mngt_cache_stop(cache1)) ocf_mngt_cache_stop(cache1, simple_complete, &context);
if (ret)
error("Unable to stop cache\n"); error("Unable to stop cache\n");
cache_priv = ocf_cache_get_priv(cache1);
/* Put the management queue */
ocf_queue_put(cache_priv->mngt_queue);
free(cache_priv);
/* Deinitialize context */ /* Deinitialize context */
ctx_cleanup(ctx); ctx_cleanup(ctx);

View File

@ -14,7 +14,7 @@
* In open() function we store uuid data as volume name (for debug messages) * In open() function we store uuid data as volume name (for debug messages)
* and allocate 200 MiB of memory to simulate backend storage device. * and allocate 200 MiB of memory to simulate backend storage device.
*/ */
static int volume_open(ocf_volume_t volume) static int volume_open(ocf_volume_t volume, void *volume_params)
{ {
const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume); const struct ocf_volume_uuid *uuid = ocf_volume_get_uuid(volume);
struct myvolume *myvolume = ocf_volume_get_priv(volume); struct myvolume *myvolume = ocf_volume_get_priv(volume);