diff options
author | Fan Zhang <roy.fan.zhang@intel.com> | 2020-04-29 14:00:03 +0100 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-04-30 14:38:33 +0000 |
commit | f539578bac8b64886b57c460c9d74273e6613f8b (patch) | |
tree | 190e09705fe1ebb46ca86a48c19de49fcaf0cbb0 /src/vnet/crypto | |
parent | 162330f25aeec09694fffaaa31ba9b318620eb9c (diff) |
crypto: introduce async crypto infra
Type: feature
Signed-off-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Dariusz Kazimierski <dariuszx.kazimierski@intel.com>
Signed-off-by: Piotr Kleski <piotrx.kleski@intel.com>
Change-Id: I4c3fcccf55c36842b7b48aed260fef2802b5c54b
Diffstat (limited to 'src/vnet/crypto')
-rw-r--r-- | src/vnet/crypto/cli.c | 195 | ||||
-rw-r--r-- | src/vnet/crypto/crypto.c | 296 | ||||
-rw-r--r-- | src/vnet/crypto/crypto.h | 327 | ||||
-rw-r--r-- | src/vnet/crypto/format.c | 55 | ||||
-rw-r--r-- | src/vnet/crypto/node.c | 184 |
5 files changed, 1025 insertions, 32 deletions
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c index c3383692757..f6778930ef7 100644 --- a/src/vnet/crypto/cli.c +++ b/src/vnet/crypto/cli.c @@ -58,26 +58,45 @@ format_vnet_crypto_engine_candidates (u8 * s, va_list * args) { vnet_crypto_engine_t *e; vnet_crypto_main_t *cm = &crypto_main; - - vnet_crypto_op_id_t id = va_arg (*args, vnet_crypto_op_id_t); + u32 id = va_arg (*args, u32); u32 ei = va_arg (*args, u32); int is_chained = va_arg (*args, int); + int is_async = va_arg (*args, int); - vec_foreach (e, cm->engines) + if (is_async) { - void * h = is_chained ? (void *) e->chained_ops_handlers[id] - : (void *) e->ops_handlers[id]; - - if (h) - { - s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines); - if (ei == e - cm->engines) - s = format (s, "%c ", '*'); - else - s = format (s, " "); - } + vec_foreach (e, cm->engines) + { + if (e->enqueue_handlers[id] && e->dequeue_handlers[id]) + { + s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines); + if (ei == e - cm->engines) + s = format (s, "%c ", '*'); + else + s = format (s, " "); + } + } + + return s; + } + else + { + vec_foreach (e, cm->engines) + { + void * h = is_chained ? (void *) e->chained_ops_handlers[id] + : (void *) e->ops_handlers[id]; + + if (h) + { + s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines); + if (ei == e - cm->engines) + s = format (s, "%c ", '*'); + else + s = format (s, " "); + } + } + return s; } - return s; } static u8 * @@ -103,9 +122,9 @@ format_vnet_crypto_handlers (u8 * s, va_list * args) s = format (s, "%-16U", format_vnet_crypto_op_type, od->type); s = format (s, "%-28U", format_vnet_crypto_engine_candidates, id, - od->active_engine_index_simple, 0); + od->active_engine_index_simple, 0, 0); s = format (s, "%U", format_vnet_crypto_engine_candidates, id, - od->active_engine_index_chained, 1); + od->active_engine_index_chained, 1, 0); first = 0; } return s; @@ -232,6 +251,148 @@ VLIB_CLI_COMMAND (set_crypto_handler_command, static) = }; /* *INDENT-ON* */ +static u8 * +format_vnet_crypto_async_handlers (u8 * s, va_list * args) +{ + vnet_crypto_async_alg_t alg = va_arg (*args, vnet_crypto_async_alg_t); + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_async_alg_data_t *d = vec_elt_at_index (cm->async_algs, alg); + u32 indent = format_get_indent (s); + int i, first = 1; + + for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_TYPES; i++) + { + vnet_crypto_async_op_data_t *od; + vnet_crypto_async_op_id_t id = d->op_by_type[i]; + + if (id == 0) + continue; + + od = cm->async_opt_data + id; + if (first == 0) + s = format (s, "\n%U", format_white_space, indent); + s = format (s, "%-16U", format_vnet_crypto_async_op_type, od->type); + + s = format (s, "%U", format_vnet_crypto_engine_candidates, id, + od->active_engine_index_async, 0, 1); + first = 0; + } + return s; +} + +static clib_error_t * +show_crypto_async_handlers_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + int i; + + if (unformat_user (input, unformat_line_input, line_input)) + unformat_free (line_input); + + vlib_cli_output (vm, "%-28s%-16s%s", "Algo", "Type", "Handler"); + + for (i = 0; i < VNET_CRYPTO_N_ASYNC_ALGS; i++) + vlib_cli_output (vm, "%-28U%U", format_vnet_crypto_async_alg, i, + format_vnet_crypto_async_handlers, i); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_crypto_async_handlers_command, static) = +{ + .path = "show crypto async handlers", + .short_help = "show crypto async handlers", + .function = show_crypto_async_handlers_command_fn, +}; +/* *INDENT-ON* */ + + +static clib_error_t * +set_crypto_async_handler_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + vnet_crypto_main_t *cm = &crypto_main; + int rc = 0; + char **args = 0, *s, **arg, *engine = 0; + int all = 0; + clib_error_t *error = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "all")) + all = 1; + else if (unformat (line_input, "%s", &s)) + vec_add1 (args, s); + else + { + error = clib_error_return (0, "invalid params"); + goto done; + } + } + + if ((vec_len (args) < 2 && !all) || (vec_len (args) == 0 && all)) + { + error = clib_error_return (0, "missing cipher or engine!"); + goto done; + } + + engine = vec_elt_at_index (args, vec_len (args) - 1)[0]; + vec_del1 (args, vec_len (args) - 1); + + if (all) + { + char *key; + u8 *value; + + /* *INDENT-OFF* */ + hash_foreach_mem (key, value, cm->async_alg_index_by_name, + ({ + (void) value; + rc += vnet_crypto_set_async_handler2 (key, engine); + })); + /* *INDENT-ON* */ + + if (rc) + vlib_cli_output (vm, "failed to set crypto engine!"); + } + else + { + vec_foreach (arg, args) + { + rc = vnet_crypto_set_async_handler2 (arg[0], engine); + if (rc) + { + vlib_cli_output (vm, "failed to set engine %s for %s!", + engine, arg[0]); + } + } + } + +done: + vec_free (engine); + vec_foreach (arg, args) vec_free (arg[0]); + vec_free (args); + unformat_free (line_input); + return error; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_crypto_async_handler_command, static) = +{ + .path = "set crypto async handler", + .short_help = "set crypto async handler type [type2 type3 ...] engine", + .function = set_crypto_async_handler_command_fn, +}; +/* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c index 1caff71b3e2..288e227821b 100644 --- a/src/vnet/crypto/crypto.c +++ b/src/vnet/crypto/crypto.c @@ -61,7 +61,6 @@ vnet_crypto_process_ops_call_handler (vlib_main_t * vm, return rv; } - static_always_inline u32 vnet_crypto_process_ops_inline (vlib_main_t * vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t * chunks, u32 n_ops) @@ -267,6 +266,44 @@ vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index, } void +vnet_crypto_register_async_handler (vlib_main_t * vm, u32 engine_index, + vnet_crypto_async_op_id_t opt, + vnet_crypto_frame_enqueue_t * enqueue_hdl, + vnet_crypto_frame_dequeue_t * dequeue_hdl) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index); + vnet_crypto_async_op_data_t *otd = cm->async_opt_data + opt; + vec_validate_aligned (cm->enqueue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS - 1, + CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (cm->dequeue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS - 1, + CLIB_CACHE_LINE_BYTES); + + /* both enqueue hdl and dequeue hdl should present */ + if (!enqueue_hdl && !dequeue_hdl) + return; + + e->enqueue_handlers[opt] = enqueue_hdl; + e->dequeue_handlers[opt] = dequeue_hdl; + if (otd->active_engine_index_async == ~0) + { + otd->active_engine_index_async = engine_index; + cm->enqueue_handlers[opt] = enqueue_hdl; + cm->dequeue_handlers[opt] = dequeue_hdl; + } + + ae = vec_elt_at_index (cm->engines, otd->active_engine_index_async); + if (ae->priority < e->priority) + { + otd->active_engine_index_async = engine_index; + cm->enqueue_handlers[opt] = enqueue_hdl; + cm->dequeue_handlers[opt] = dequeue_hdl; + } + + return; +} + +void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index, vnet_crypto_key_handler_t * key_handler) { @@ -318,10 +355,10 @@ vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data, pool_get_zero (cm->keys, key); index = key - cm->keys; + key->type = VNET_CRYPTO_KEY_TYPE_DATA; key->alg = alg; vec_validate_aligned (key->data, length - 1, CLIB_CACHE_LINE_BYTES); clib_memcpy (key->data, data, length); - /* *INDENT-OFF* */ vec_foreach (engine, cm->engines) if (engine->key_op_handler) @@ -343,11 +380,218 @@ vnet_crypto_key_del (vlib_main_t * vm, vnet_crypto_key_index_t index) engine->key_op_handler (vm, VNET_CRYPTO_KEY_OP_DEL, index); /* *INDENT-ON* */ - clib_memset (key->data, 0, vec_len (key->data)); - vec_free (key->data); + if (key->type == VNET_CRYPTO_KEY_TYPE_DATA) + { + clib_memset (key->data, 0, vec_len (key->data)); + vec_free (key->data); + } + else if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + { + key->index_crypto = key->index_integ = 0; + } + pool_put (cm->keys, key); } +vnet_crypto_async_alg_t +vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg, + vnet_crypto_alg_t integ_alg) +{ +#define _(c, h, s, k ,d) \ + if (crypto_alg == VNET_CRYPTO_ALG_##c && \ + integ_alg == VNET_CRYPTO_ALG_HMAC_##h) \ + return VNET_CRYPTO_ALG_##c##_##h##_TAG##d; + foreach_crypto_link_async_alg +#undef _ + return ~0; +} + +u32 +vnet_crypto_key_add_linked (vlib_main_t * vm, + vnet_crypto_key_index_t index_crypto, + vnet_crypto_key_index_t index_integ) +{ + u32 index; + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_engine_t *engine; + vnet_crypto_key_t *key_crypto, *key_integ, *key; + vnet_crypto_async_alg_t linked_alg; + + key_crypto = pool_elt_at_index (cm->keys, index_crypto); + key_integ = pool_elt_at_index (cm->keys, index_integ); + + if (!key_crypto || !key_integ) + return ~0; + + linked_alg = vnet_crypto_link_algs (key_crypto->alg, key_integ->alg); + if (linked_alg == ~0) + return ~0; + + pool_get_zero (cm->keys, key); + index = key - cm->keys; + key->type = VNET_CRYPTO_KEY_TYPE_LINK; + key->index_crypto = index_crypto; + key->index_integ = index_integ; + key->async_alg = linked_alg; + + /* *INDENT-OFF* */ + vec_foreach (engine, cm->engines) + if (engine->key_op_handler) + engine->key_op_handler (vm, VNET_CRYPTO_KEY_OP_ADD, index); + /* *INDENT-ON* */ + + return index; +} + +clib_error_t * +crypto_dispatch_enable_disable (int is_enable) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_thread_main_t *tm = vlib_get_thread_main (); + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch"); + vnet_crypto_main_t *cm = &crypto_main; + u32 skip_master = vlib_num_workers () > 0, i; + u32 state_change = 0; + vlib_node_state_t state; + + if (is_enable && cm->async_refcnt > 0) + { + state_change = 1; + state = VLIB_NODE_STATE_POLLING; + } + + if (!is_enable && cm->async_refcnt == 0) + { + state_change = 1; + state = VLIB_NODE_STATE_DISABLED; + } + + if (state_change) + for (i = skip_master; i < tm->n_vlib_mains; i++) + vlib_node_set_state (vlib_mains[i], node->index, state); + + return 0; +} + +static_always_inline void +crypto_set_active_async_engine (vnet_crypto_async_op_data_t * od, + vnet_crypto_async_op_id_t id, u32 ei) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei); + + if (ce->enqueue_handlers[id] && ce->dequeue_handlers[id]) + { + od->active_engine_index_async = ei; + cm->enqueue_handlers[id] = ce->enqueue_handlers[id]; + cm->dequeue_handlers[id] = ce->dequeue_handlers[id]; + } +} + +int +vnet_crypto_set_async_handler2 (char *alg_name, char *engine) +{ + uword *p; + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_async_alg_data_t *ad; + int i; + + p = hash_get_mem (cm->async_alg_index_by_name, alg_name); + if (!p) + return -1; + + ad = vec_elt_at_index (cm->async_algs, p[0]); + + p = hash_get_mem (cm->engine_index_by_name, engine); + if (!p) + return -1; + + for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_TYPES; i++) + { + vnet_crypto_async_op_data_t *od; + vnet_crypto_async_op_id_t id = ad->op_by_type[i]; + if (id == 0) + continue; + + od = cm->async_opt_data + id; + crypto_set_active_async_engine (od, id, p[0]); + } + + return 0; +} + +u32 +vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_async_next_node_t *nn = 0; + vlib_node_t *cc, *pn; + uword index = vec_len (cm->next_nodes); + + pn = vlib_get_node_by_name (vm, (u8 *) post_node_name); + if (!pn) + return ~0; + + /* *INDENT-OFF* */ + vec_foreach (cm->next_nodes, nn) + { + if (nn->node_idx == pn->index) + return nn->next_idx; + } + /* *INDENT-ON* */ + + vec_validate (cm->next_nodes, index); + nn = vec_elt_at_index (cm->next_nodes, index); + + cc = vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch"); + nn->next_idx = vlib_node_add_named_next (vm, cc->index, post_node_name); + nn->node_idx = pn->index; + + return nn->next_idx; +} + +void +vnet_crypto_request_async_mode (int is_enable) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_thread_main_t *tm = vlib_get_thread_main (); + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch"); + vnet_crypto_main_t *cm = &crypto_main; + u32 skip_master = vlib_num_workers () > 0, i; + u32 state_change = 0; + vlib_node_state_t state; + + if (is_enable && cm->async_refcnt == 0) + { + state_change = 1; + state = VLIB_NODE_STATE_POLLING; + } + + if (!is_enable && cm->async_refcnt == 1) + { + state_change = 1; + state = VLIB_NODE_STATE_DISABLED; + } + + if (state_change) + for (i = skip_master; i < tm->n_vlib_mains; i++) + vlib_node_set_state (vlib_mains[i], node->index, state); + + if (is_enable) + cm->async_refcnt += 1; + else if (cm->async_refcnt > 0) + cm->async_refcnt -= 1; +} + +int +vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t op) +{ + vnet_crypto_main_t *cm = &crypto_main; + + return (op < vec_len (cm->enqueue_handlers) && + NULL != cm->enqueue_handlers[op]); +} + static void vnet_crypto_init_cipher_data (vnet_crypto_alg_t alg, vnet_crypto_op_id_t eid, vnet_crypto_op_id_t did, char *name, u8 is_aead) @@ -392,16 +636,44 @@ vnet_crypto_init_hmac_data (vnet_crypto_alg_t alg, hash_set_mem (cm->alg_index_by_name, name, alg); } +static void +vnet_crypto_init_async_data (vnet_crypto_async_alg_t alg, + vnet_crypto_async_op_id_t eid, + vnet_crypto_async_op_id_t did, char *name) +{ + vnet_crypto_main_t *cm = &crypto_main; + + cm->async_algs[alg].name = name; + cm->async_algs[alg].op_by_type[VNET_CRYPTO_ASYNC_OP_TYPE_ENCRYPT] = eid; + cm->async_algs[alg].op_by_type[VNET_CRYPTO_ASYNC_OP_TYPE_DECRYPT] = did; + cm->async_opt_data[eid].type = VNET_CRYPTO_ASYNC_OP_TYPE_ENCRYPT; + cm->async_opt_data[eid].alg = alg; + cm->async_opt_data[eid].active_engine_index_async = ~0; + cm->async_opt_data[eid].active_engine_index_async = ~0; + cm->async_opt_data[did].type = VNET_CRYPTO_ASYNC_OP_TYPE_DECRYPT; + cm->async_opt_data[did].alg = alg; + cm->async_opt_data[did].active_engine_index_async = ~0; + cm->async_opt_data[did].active_engine_index_async = ~0; + hash_set_mem (cm->async_alg_index_by_name, name, alg); +} + clib_error_t * vnet_crypto_init (vlib_main_t * vm) { vnet_crypto_main_t *cm = &crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); + vnet_crypto_thread_t *ct = 0; cm->engine_index_by_name = hash_create_string ( /* size */ 0, sizeof (uword)); cm->alg_index_by_name = hash_create_string (0, sizeof (uword)); + cm->async_alg_index_by_name = hash_create_string (0, sizeof (uword)); vec_validate_aligned (cm->threads, tm->n_vlib_mains, CLIB_CACHE_LINE_BYTES); + vec_foreach (ct, cm->threads) + pool_alloc_aligned (ct->frame_pool, 256, CLIB_CACHE_LINE_BYTES); vec_validate (cm->algs, VNET_CRYPTO_N_ALGS); + vec_validate (cm->async_algs, VNET_CRYPTO_N_ASYNC_ALGS); + clib_bitmap_validate (cm->async_active_ids, VNET_CRYPTO_ASYNC_OP_N_IDS - 1); + #define _(n, s, l) \ vnet_crypto_init_cipher_data (VNET_CRYPTO_ALG_##n, \ VNET_CRYPTO_OP_##n##_ENC, \ @@ -419,7 +691,21 @@ vnet_crypto_init (vlib_main_t * vm) VNET_CRYPTO_OP_##n##_HMAC, "hmac-" s); foreach_crypto_hmac_alg; #undef _ - return 0; +#define _(n, s, k, t, a) \ + vnet_crypto_init_async_data (VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a, \ + VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \ + VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \ + s); + foreach_crypto_aead_async_alg +#undef _ +#define _(c, h, s, k ,d) \ + vnet_crypto_init_async_data (VNET_CRYPTO_ALG_##c##_##h##_TAG##d, \ + VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \ + VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \ + s); + foreach_crypto_link_async_alg +#undef _ + return 0; } VLIB_INIT_FUNCTION (vnet_crypto_init); diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h index f89ecf9f3d5..b0a83e08be3 100644 --- a/src/vnet/crypto/crypto.h +++ b/src/vnet/crypto/crypto.h @@ -16,10 +16,10 @@ #ifndef included_vnet_crypto_crypto_h #define included_vnet_crypto_crypto_h -#define VNET_CRYPTO_RING_SIZE 512 - #include <vlib/vlib.h> +#define VNET_CRYPTO_FRAME_SIZE 32 + /* CRYPTO_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES */ #define foreach_crypto_cipher_alg \ _(DES_CBC, "des-cbc", 7) \ @@ -45,7 +45,6 @@ _(SHA384, "sha-384") \ _(SHA512, "sha-512") - #define foreach_crypto_op_type \ _(ENCRYPT, "encrypt") \ _(DECRYPT, "decrypt") \ @@ -62,10 +61,46 @@ typedef enum } vnet_crypto_op_type_t; #define foreach_crypto_op_status \ + _(IDLE, "idle") \ _(PENDING, "pending") \ + _(WORK_IN_PROGRESS, "work-in-progress") \ _(COMPLETED, "completed") \ _(FAIL_NO_HANDLER, "no-handler") \ - _(FAIL_BAD_HMAC, "bad-hmac") + _(FAIL_BAD_HMAC, "bad-hmac") \ + _(FAIL_ENGINE_ERR, "engine-error") + +/** async crypto **/ + +/* CRYPTO_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES, TAG_LEN, AAD_LEN */ +#define foreach_crypto_aead_async_alg \ + _(AES_128_GCM, "aes-128-gcm-aad8", 16, 16, 8) \ + _(AES_128_GCM, "aes-128-gcm-aad12", 16, 16, 12) \ + _(AES_192_GCM, "aes-192-gcm-aad8", 24, 16, 8) \ + _(AES_192_GCM, "aes-192-gcm-aad12", 24, 16, 12) \ + _(AES_256_GCM, "aes-256-gcm-aad8", 32, 16, 8) \ + _(AES_256_GCM, "aes-256-gcm-aad12", 32, 16, 12) + +/* CRYPTO_ID, INTEG_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES, DIGEST_LEN */ +#define foreach_crypto_link_async_alg \ + _ (AES_128_CBC, SHA1, "aes-128-cbc-hmac-sha-1", 16, 12) \ + _ (AES_192_CBC, SHA1, "aes-192-cbc-hmac-sha-1", 24, 12) \ + _ (AES_256_CBC, SHA1, "aes-256-cbc-hmac-sha-1", 32, 12) \ + _ (AES_128_CBC, SHA224, "aes-128-cbc-hmac-sha-224", 16, 14) \ + _ (AES_192_CBC, SHA224, "aes-192-cbc-hmac-sha-224", 24, 14) \ + _ (AES_256_CBC, SHA224, "aes-256-cbc-hmac-sha-224", 32, 14) \ + _ (AES_128_CBC, SHA256, "aes-128-cbc-hmac-sha-256", 16, 16) \ + _ (AES_192_CBC, SHA256, "aes-192-cbc-hmac-sha-256", 24, 16) \ + _ (AES_256_CBC, SHA256, "aes-256-cbc-hmac-sha-256", 32, 16) \ + _ (AES_128_CBC, SHA384, "aes-128-cbc-hmac-sha-384", 16, 24) \ + _ (AES_192_CBC, SHA384, "aes-192-cbc-hmac-sha-384", 24, 24) \ + _ (AES_256_CBC, SHA384, "aes-256-cbc-hmac-sha-384", 32, 24) \ + _ (AES_128_CBC, SHA512, "aes-128-cbc-hmac-sha-512", 16, 32) \ + _ (AES_192_CBC, SHA512, "aes-192-cbc-hmac-sha-512", 24, 32) \ + _ (AES_256_CBC, SHA512, "aes-256-cbc-hmac-sha-512", 32, 32) + +#define foreach_crypto_async_op_type \ + _(ENCRYPT, "async-encrypt") \ + _(DECRYPT, "async-decrypt") typedef enum { @@ -96,10 +131,63 @@ typedef enum VNET_CRYPTO_N_ALGS, } vnet_crypto_alg_t; +typedef enum +{ +#define _(n, s) VNET_CRYPTO_ASYNC_OP_TYPE_##n, + foreach_crypto_async_op_type +#undef _ + VNET_CRYPTO_ASYNC_OP_N_TYPES, +} vnet_crypto_async_op_type_t; + +typedef enum +{ + VNET_CRYPTO_ASYNC_ALG_NONE = 0, +#define _(n, s, k, t, a) \ + VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a, + foreach_crypto_aead_async_alg +#undef _ +#define _(c, h, s, k ,d) \ + VNET_CRYPTO_ALG_##c##_##h##_TAG##d, + foreach_crypto_link_async_alg +#undef _ + VNET_CRYPTO_N_ASYNC_ALGS, +} vnet_crypto_async_alg_t; + +typedef enum +{ + VNET_CRYPTO_ASYNC_OP_NONE = 0, +#define _(n, s, k, t, a) \ + VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \ + VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, + foreach_crypto_aead_async_alg +#undef _ +#define _(c, h, s, k ,d) \ + VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \ + VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, + foreach_crypto_link_async_alg +#undef _ + VNET_CRYPTO_ASYNC_OP_N_IDS, +} vnet_crypto_async_op_id_t; + typedef struct { - u8 *data; - vnet_crypto_alg_t alg:8; + union + { + struct + { + u8 *data; + vnet_crypto_alg_t alg:8; + }; + struct + { + u32 index_crypto; + u32 index_integ; + vnet_crypto_async_alg_t async_alg:8; + }; + }; +#define VNET_CRYPTO_KEY_TYPE_DATA 0 +#define VNET_CRYPTO_KEY_TYPE_LINK 1 + u8 type; } vnet_crypto_key_t; typedef enum @@ -116,6 +204,7 @@ typedef enum } vnet_crypto_op_id_t; /* *INDENT-ON* */ + typedef enum { CRYPTO_OP_SIMPLE, @@ -196,8 +285,58 @@ typedef struct typedef struct { + vnet_crypto_async_op_type_t type; + vnet_crypto_async_alg_t alg; + u32 active_engine_index_async; +} vnet_crypto_async_op_data_t; + +typedef struct +{ + char *name; + vnet_crypto_async_op_id_t op_by_type[VNET_CRYPTO_ASYNC_OP_N_TYPES]; +} vnet_crypto_async_alg_data_t; + +typedef struct +{ + vnet_crypto_op_status_t status:8; + u32 key_index; + i16 crypto_start_offset; /* first buffer offset */ + i16 integ_start_offset; + u32 crypto_total_length; + /* adj total_length for integ, e.g.4 bytes for IPSec ESN */ + u16 integ_length_adj; + u8 *iv; + union + { + u8 *digest; + u8 *tag; + }; + u8 *aad; + u8 flags; /**< share same VNET_CRYPTO_OP_FLAG_* values */ +} vnet_crypto_async_frame_elt_t; + +typedef struct +{ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - clib_bitmap_t *act_queues; +#define VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED 0 +#define VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS 1 +#define VNET_CRYPTO_FRAME_STATE_SUCCESS 2 +#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR 3 + u8 state; + vnet_crypto_async_op_id_t op:8; + u16 n_elts; + vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]; + u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]; + u16 next_node_index[VNET_CRYPTO_FRAME_SIZE]; +} vnet_crypto_async_frame_t; + +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + vnet_crypto_async_frame_t *frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; + vnet_crypto_async_frame_t *frame_pool; + u32 *buffer_indice; + u16 *nexts; } vnet_crypto_thread_t; typedef u32 vnet_crypto_key_index_t; @@ -214,6 +353,12 @@ typedef void (vnet_crypto_key_handler_t) (vlib_main_t * vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx); +/** async crypto function handlers **/ +typedef int (vnet_crypto_frame_enqueue_t) (vlib_main_t * vm, + vnet_crypto_async_frame_t * frame); +typedef vnet_crypto_async_frame_t * + (vnet_crypto_frame_dequeue_t) (vlib_main_t * vm); + u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio, char *desc); @@ -226,6 +371,7 @@ void vnet_crypto_register_chained_ops_handler (vlib_main_t * vm, vnet_crypto_op_id_t opt, vnet_crypto_chained_ops_handler_t * oph); + void vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index, vnet_crypto_op_id_t opt, vnet_crypto_ops_handler_t * fn, @@ -235,6 +381,15 @@ void vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index, void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index, vnet_crypto_key_handler_t * keyh); +/** async crypto register functions */ +u32 vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name); +void vnet_crypto_register_async_handler (vlib_main_t * vm, + u32 engine_index, + vnet_crypto_async_op_id_t opt, + vnet_crypto_frame_enqueue_t * enq_fn, + vnet_crypto_frame_dequeue_t * + deq_fn); + typedef struct { char *name; @@ -244,32 +399,46 @@ typedef struct vnet_crypto_ops_handler_t *ops_handlers[VNET_CRYPTO_N_OP_IDS]; vnet_crypto_chained_ops_handler_t * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS]; + vnet_crypto_frame_enqueue_t *enqueue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS]; + vnet_crypto_frame_dequeue_t *dequeue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS]; } vnet_crypto_engine_t; typedef struct { + u32 node_idx; + u32 next_idx; +} vnet_crypto_async_next_node_t; + +typedef struct +{ vnet_crypto_alg_data_t *algs; vnet_crypto_thread_t *threads; vnet_crypto_ops_handler_t **ops_handlers; vnet_crypto_chained_ops_handler_t **chained_ops_handlers; + vnet_crypto_frame_enqueue_t **enqueue_handlers; + vnet_crypto_frame_dequeue_t **dequeue_handlers; + clib_bitmap_t *async_active_ids; vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS]; + vnet_crypto_async_op_data_t async_opt_data[VNET_CRYPTO_ASYNC_OP_N_IDS]; vnet_crypto_engine_t *engines; vnet_crypto_key_t *keys; uword *engine_index_by_name; uword *alg_index_by_name; + uword *async_alg_index_by_name; + vnet_crypto_async_alg_data_t *async_algs; + u32 async_refcnt; + vnet_crypto_async_next_node_t *next_nodes; } vnet_crypto_main_t; extern vnet_crypto_main_t crypto_main; -u32 vnet_crypto_submit_ops (vlib_main_t * vm, vnet_crypto_op_t ** jobs, - u32 n_jobs); - u32 vnet_crypto_process_chained_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t * chunks, u32 n_ops); u32 vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops); + int vnet_crypto_set_handler2 (char *ops_handler_name, char *engine, crypto_op_class_type_t oct); int vnet_crypto_is_set_handler (vnet_crypto_alg_t alg); @@ -278,6 +447,27 @@ u32 vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data, u16 length); void vnet_crypto_key_del (vlib_main_t * vm, vnet_crypto_key_index_t index); +/** + * Use 2 created keys to generate new key for linked algs (cipher + integ) + * The returned key index is to be used for linked alg only. + **/ +u32 vnet_crypto_key_add_linked (vlib_main_t * vm, + vnet_crypto_key_index_t index_crypto, + vnet_crypto_key_index_t index_integ); + +clib_error_t *crypto_dispatch_enable_disable (int is_enable); + +int vnet_crypto_set_async_handler2 (char *alg_name, char *engine); + +int vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t opt); + +void vnet_crypto_request_async_mode (int is_enable); + +vnet_crypto_async_alg_t vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg, + vnet_crypto_alg_t integ_alg); + +clib_error_t *crypto_dispatch_enable_disable (int is_enable); + format_function_t format_vnet_crypto_alg; format_function_t format_vnet_crypto_engine; format_function_t format_vnet_crypto_op; @@ -285,6 +475,10 @@ format_function_t format_vnet_crypto_op_type; format_function_t format_vnet_crypto_op_status; unformat_function_t unformat_vnet_crypto_alg; +format_function_t format_vnet_crypto_async_op; +format_function_t format_vnet_crypto_async_alg; +format_function_t format_vnet_crypto_async_op_type; + static_always_inline void vnet_crypto_op_init (vnet_crypto_op_t * op, vnet_crypto_op_id_t type) { @@ -318,6 +512,119 @@ vnet_crypto_set_handler (char *alg_name, char *engine) return vnet_crypto_set_handler2 (alg_name, engine, CRYPTO_OP_BOTH); } +/** async crypto inline functions **/ + +static_always_inline vnet_crypto_async_frame_t * +vnet_crypto_async_get_frame (vlib_main_t * vm, vnet_crypto_async_op_id_t opt) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; + vnet_crypto_async_frame_t *f = ct->frames[opt]; + + if (!f) + { + pool_get_aligned (ct->frame_pool, f, CLIB_CACHE_LINE_BYTES); + if (CLIB_DEBUG > 0) + clib_memset (f, 0xfe, sizeof (*f)); + f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; + f->op = opt; + f->n_elts = 0; + ct->frames[opt] = f; + } + return f; +} + +static_always_inline void +vnet_crypto_async_free_frame (vlib_main_t * vm, + vnet_crypto_async_frame_t * frame) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; + pool_put (ct->frame_pool, frame); +} + +static_always_inline int +vnet_crypto_async_submit_open_frame (vlib_main_t * vm, + vnet_crypto_async_frame_t * frame) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; + vnet_crypto_async_op_id_t opt = frame->op; + int ret = (cm->enqueue_handlers[frame->op]) (vm, frame); + clib_bitmap_set_no_check (cm->async_active_ids, opt, 1); + if (PREDICT_TRUE (ret == 0)) + { + vnet_crypto_async_frame_t *nf = 0; + frame->state = VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS; + pool_get_aligned (ct->frame_pool, nf, CLIB_CACHE_LINE_BYTES); + if (CLIB_DEBUG > 0) + clib_memset (nf, 0xfe, sizeof (*nf)); + nf->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; + nf->op = opt; + nf->n_elts = 0; + ct->frames[opt] = nf; + } + return ret; +} + +static_always_inline int +vnet_crypto_async_add_to_frame (vlib_main_t * vm, + vnet_crypto_async_frame_t ** frame, + u32 key_index, + u32 crypto_len, i16 integ_len_adj, + i16 crypto_start_offset, + u16 integ_start_offset, + u32 buffer_index, + u16 next_node, + u8 * iv, u8 * tag, u8 * aad, u8 flags) +{ + vnet_crypto_async_frame_t *f = *frame; + vnet_crypto_async_frame_elt_t *fe; + u16 index; + + if (PREDICT_FALSE (f->n_elts == VNET_CRYPTO_FRAME_SIZE)) + { + vnet_crypto_async_op_id_t opt = f->op; + int ret; + ret = vnet_crypto_async_submit_open_frame (vm, f); + if (PREDICT_FALSE (ret < 0)) + return -1; + f = vnet_crypto_async_get_frame (vm, opt); + *frame = f; + } + + index = f->n_elts; + fe = &f->elts[index]; + f->n_elts++; + fe->key_index = key_index; + fe->crypto_total_length = crypto_len; + fe->crypto_start_offset = crypto_start_offset; + fe->integ_start_offset = integ_start_offset; + fe->integ_length_adj = integ_len_adj; + fe->iv = iv; + fe->tag = tag; + fe->aad = aad; + fe->flags = flags; + f->buffer_indices[index] = buffer_index; + f->next_node_index[index] = next_node; + + return 0; +} + +static_always_inline void +vnet_crypto_async_reset_frame (vnet_crypto_async_frame_t * f) +{ + vnet_crypto_async_op_id_t opt; + ASSERT (f != 0); + ASSERT (f->state == VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED); + opt = f->op; + if (CLIB_DEBUG > 0) + clib_memset (f, 0xfe, sizeof (*f)); + f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; + f->op = opt; + f->n_elts = 0; +} + #endif /* included_vnet_crypto_crypto_h */ /* diff --git a/src/vnet/crypto/format.c b/src/vnet/crypto/format.c index 715941e0ee7..b58ab7fceb2 100644 --- a/src/vnet/crypto/format.c +++ b/src/vnet/crypto/format.c @@ -105,6 +105,61 @@ format_vnet_crypto_engine (u8 * s, va_list * args) return format (s, "%s", e->name); } +u8 * +format_vnet_crypto_async_op_type (u8 * s, va_list * args) +{ + vnet_crypto_async_op_type_t opt = + va_arg (*args, vnet_crypto_async_op_type_t); + char *strings[] = { +#define _(n, s) [VNET_CRYPTO_ASYNC_OP_TYPE_##n] = s, + foreach_crypto_async_op_type +#undef _ + }; + + if (opt >= VNET_CRYPTO_ASYNC_OP_N_TYPES) + return format (s, "unknown"); + + return format (s, "%s", strings[opt]); +} + +u8 * +format_vnet_crypto_async_alg (u8 * s, va_list * args) +{ + vnet_crypto_async_alg_t alg = va_arg (*args, vnet_crypto_async_alg_t); + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_async_alg_data_t *d = vec_elt_at_index (cm->async_algs, alg); + return format (s, "%s", d->name); +} + +u8 * +format_vnet_crypto_async_op (u8 * s, va_list * args) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_async_op_id_t op = va_arg (*args, int); // vnet_crypto_op_id_t); + vnet_crypto_async_op_data_t *otd = cm->async_opt_data + op; + + return format (s, "%U-%U", format_vnet_crypto_async_op_type, otd->type, + format_vnet_crypto_async_alg, otd->alg); + + vnet_crypto_async_op_id_t opt = va_arg (*args, vnet_crypto_async_op_id_t); + char *strings[] = { +#define _(n, s, k, t, a) \ + [VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC] = s "-enc", \ + [VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC] = s "-dec", + foreach_crypto_aead_async_alg +#undef _ +#define _(c, h, s, k ,d) \ + [VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC] = s "-enc", \ + [VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC] = s "-dec", + foreach_crypto_link_async_alg +#undef _ + }; + + if (opt >= VNET_CRYPTO_ASYNC_OP_N_IDS) + return format (s, "unknown"); + + return format (s, "%s", strings[opt]); +} /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c new file mode 100644 index 00000000000..51ee63d1d62 --- /dev/null +++ b/src/vnet/crypto/node.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2020 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stdbool.h> +#include <vlib/vlib.h> +#include <vnet/crypto/crypto.h> + +typedef enum +{ +#define _(sym,str) VNET_CRYPTO_ASYNC_ERROR_##sym, + foreach_crypto_op_status +#undef _ + VNET_CRYPTO_ASYNC_N_ERROR, +} vnet_crypto_async_error_t; + +static char *vnet_crypto_async_error_strings[] = { +#define _(sym,string) string, + foreach_crypto_op_status +#undef _ +}; + +#define foreach_crypto_dispatch_next \ + _(ERR_DROP, "error-drop") + +typedef enum +{ +#define _(n, s) CRYPTO_DISPATCH_NEXT_##n, + foreach_crypto_dispatch_next +#undef _ + CRYPTO_DISPATCH_N_NEXT, +} crypto_dispatch_next_t; + +typedef struct +{ + vnet_crypto_op_status_t op_status; + vnet_crypto_async_op_id_t op; +} crypto_dispatch_trace_t; + +static u8 * +format_crypto_dispatch_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *); + + s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op, + format_vnet_crypto_op_status, t->op_status); + return s; +} + +static void +vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_buffer_t * b, + vnet_crypto_async_op_id_t op_id, + vnet_crypto_op_status_t status) +{ + crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr)); + tr->op_status = status; + tr->op = op_id; +} + +static_always_inline u32 +crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_thread_t * ct, + vnet_crypto_frame_dequeue_t * hdl, + u32 n_cache, u32 * n_total) +{ + vnet_crypto_async_frame_t *cf = (hdl) (vm); + + while (cf) + { + vec_validate (ct->buffer_indice, n_cache + cf->n_elts); + vec_validate (ct->nexts, n_cache + cf->n_elts); + clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices, + sizeof (u32) * cf->n_elts); + if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS) + { + clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index, + sizeof (u16) * cf->n_elts); + } + else + { + u32 i; + for (i = 0; i < cf->n_elts; i++) + { + if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP; + vlib_node_increment_counter (vm, node->node_index, + cf->elts[i].status, 1); + } + else + ct->nexts[i + n_cache] = cf->next_node_index[i]; + } + } + n_cache += cf->n_elts; + *n_total += cf->n_elts; + if (n_cache >= VLIB_FRAME_SIZE) + { + vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, ct->nexts, + n_cache); + n_cache = 0; + } + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) + { + u32 i; + + for (i = 0; i < cf->n_elts; i++) + { + vlib_buffer_t *b = vlib_get_buffer (vm, cf->buffer_indices[i]); + if (b->flags & VLIB_BUFFER_IS_TRACED) + vnet_crypto_async_add_trace (vm, node, b, cf->op, + cf->elts[i].status); + } + } + vnet_crypto_async_free_frame (vm, cf); + cf = (hdl) (vm); + } + + return n_cache; +} + +VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; + u32 n_dispatched = 0, n_cache = 0; + u32 index; + + /* *INDENT-OFF* */ + clib_bitmap_foreach (index, cm->async_active_ids, ({ + n_cache = crypto_dequeue_frame (vm, node, ct, cm->dequeue_handlers[index], + n_cache, &n_dispatched); + })); + /* *INDENT-ON* */ + if (n_cache) + vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, ct->nexts, + n_cache); + + return n_dispatched; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (crypto_dispatch_node) = { + .name = "crypto-dispatch", + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, + .format_trace = format_crypto_dispatch_trace, + + .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings), + .error_strings = vnet_crypto_async_error_strings, + + .n_next_nodes = CRYPTO_DISPATCH_N_NEXT, + .next_nodes = { +#define _(n, s) \ + [CRYPTO_DISPATCH_NEXT_##n] = s, + foreach_crypto_dispatch_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ |