From 6c8533d4c18460029adbe0825b8dee257805fbc8 Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Thu, 25 Feb 2021 12:53:36 +0000 Subject: dpdk: deprecate ipsec backend Type: refactor DPDK crypto devices are now accessible via the async infra, so there is no need for the DPDK ipsec plugin. In addition this patch fixes the problem that cryptodev backend not working when master core and worker cores lies in different numa nodes. Signed-off-by: Fan Zhang Signed-off-by: Neale Ranns Change-Id: Ie8516bea706248c7bc25abac53a9c656bb8247d9 --- extras/deprecated/dpdk-ipsec/cli.c | 674 ++++++++++++ extras/deprecated/dpdk-ipsec/crypto_node.c | 330 ++++++ extras/deprecated/dpdk-ipsec/dir.dox | 27 + .../deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md | 87 ++ extras/deprecated/dpdk-ipsec/esp_decrypt.c | 739 +++++++++++++ extras/deprecated/dpdk-ipsec/esp_encrypt.c | 709 +++++++++++++ extras/deprecated/dpdk-ipsec/ipsec.c | 1087 ++++++++++++++++++++ extras/deprecated/dpdk-ipsec/ipsec.h | 404 ++++++++ src/plugins/dpdk/CMakeLists.txt | 9 - src/plugins/dpdk/cryptodev/cryptodev.c | 429 ++++---- src/plugins/dpdk/cryptodev/cryptodev_dp_api.c | 616 ++++++----- src/plugins/dpdk/ipsec/cli.c | 674 ------------ src/plugins/dpdk/ipsec/crypto_node.c | 330 ------ src/plugins/dpdk/ipsec/dir.dox | 27 - src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md | 87 -- src/plugins/dpdk/ipsec/esp_decrypt.c | 739 ------------- src/plugins/dpdk/ipsec/esp_encrypt.c | 710 ------------- src/plugins/dpdk/ipsec/ipsec.c | 1087 -------------------- src/plugins/dpdk/ipsec/ipsec.h | 403 -------- 19 files changed, 4635 insertions(+), 4533 deletions(-) create mode 100644 extras/deprecated/dpdk-ipsec/cli.c create mode 100644 extras/deprecated/dpdk-ipsec/crypto_node.c create mode 100644 extras/deprecated/dpdk-ipsec/dir.dox create mode 100644 extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md create mode 100644 extras/deprecated/dpdk-ipsec/esp_decrypt.c create mode 100644 extras/deprecated/dpdk-ipsec/esp_encrypt.c create mode 100644 extras/deprecated/dpdk-ipsec/ipsec.c create mode 100644 extras/deprecated/dpdk-ipsec/ipsec.h delete mode 100644 src/plugins/dpdk/ipsec/cli.c delete mode 100644 src/plugins/dpdk/ipsec/crypto_node.c delete mode 100644 src/plugins/dpdk/ipsec/dir.dox delete mode 100644 src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md delete mode 100644 src/plugins/dpdk/ipsec/esp_decrypt.c delete mode 100644 src/plugins/dpdk/ipsec/esp_encrypt.c delete mode 100644 src/plugins/dpdk/ipsec/ipsec.c delete mode 100644 src/plugins/dpdk/ipsec/ipsec.h diff --git a/extras/deprecated/dpdk-ipsec/cli.c b/extras/deprecated/dpdk-ipsec/cli.c new file mode 100644 index 00000000000..8fdda020a77 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/cli.c @@ -0,0 +1,674 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +static u8 * +format_crypto_resource (u8 * s, va_list * args) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + u32 indent = va_arg (*args, u32); + u32 res_idx = va_arg (*args, u32); + + crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); + + + s = format (s, "%U thr_id %3d qp %2u dec_inflight %u, enc_inflights %u\n", + format_white_space, indent, (i16) res->thread_idx, + res->qp_id, res->inflights[0], res->inflights[1]); + + return s; +} + +static u8 * +format_crypto (u8 * s, va_list * args) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev = va_arg (*args, crypto_dev_t *); + crypto_drv_t *drv = vec_elt_at_index (dcm->drv, dev->drv_id); + u64 feat, mask; + u32 i; + char *pre = " "; + + s = format (s, "%-25s%-20s%-10s\n", dev->name, drv->name, + rte_cryptodevs[dev->id].data->dev_started ? "up" : "down"); + s = format (s, " numa_node %u, max_queues %u\n", dev->numa, dev->max_qp); + + if (dev->features) + { + for (mask = 1; mask != 0; mask <<= 1) + { + feat = dev->features & mask; + if (feat) + { + s = + format (s, "%s%s", pre, + rte_cryptodev_get_feature_name (feat)); + pre = ", "; + } + } + s = format (s, "\n"); + } + + s = format (s, " Cipher:"); + pre = " "; + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i]) + { + s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + s = format (s, " Auth:"); + pre = " "; + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i]) + { + s = format (s, "%s%s", pre, dcm->auth_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + struct rte_cryptodev_stats stats; + rte_cryptodev_stats_get (dev->id, &stats); + + s = + format (s, + " enqueue %-10lu dequeue %-10lu enqueue_err %-10lu dequeue_err %-10lu \n", + stats.enqueued_count, stats.dequeued_count, + stats.enqueue_err_count, stats.dequeue_err_count); + + u16 *res_idx; + s = format (s, " free_resources %u :", vec_len (dev->free_resources)); + + u32 indent = format_get_indent (s); + s = format (s, "\n"); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->free_resources) + s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); + /* *INDENT-ON* */ + + s = format (s, " used_resources %u :", vec_len (dev->used_resources)); + indent = format_get_indent (s); + + s = format (s, "\n"); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); + /* *INDENT-ON* */ + + s = format (s, "\n"); + + return s; +} + + +static clib_error_t * +clear_crypto_stats_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + rte_cryptodev_stats_reset (dev->id); + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to clear the DPDK Crypto device statistics. + * + * @cliexpar + * Example of how to clear the DPDK Crypto device statistics: + * @cliexsart{clear dpdk crypto devices statistics} + * vpp# clear dpdk crypto devices statistics + * @cliexend + * Example of clearing the DPDK Crypto device statistic data: + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_dpdk_crypto_stats, static) = { + .path = "clear dpdk crypto devices statistics", + .short_help = "clear dpdk crypto devices statistics", + .function = clear_crypto_stats_fn, +}; +/* *INDENT-ON* */ + + +static clib_error_t * +show_dpdk_crypto_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + vlib_cli_output (vm, "%U", format_crypto, dev); + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to display the DPDK Crypto device information. + * + * @cliexpar + * Example of how to display the DPDK Crypto device information: + * @cliexsart{show dpdk crypto devices} + * vpp# show dpdk crypto devices + * aesni_mb0 crypto_aesni_mb up + * numa_node 0, max_queues 4 + * SYMMETRIC_CRYPTO, SYM_OPERATION_CHAINING, CPU_AVX2, CPU_AESNI + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256, aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * enqueue 2 dequeue 2 enqueue_err 0 dequeue_err 0 + * free_resources 3 : + * thr_id -1 qp 3 inflight 0 + * thr_id -1 qp 2 inflight 0 + * thr_id -1 qp 1 inflight 0 + * used_resources 1 : + * thr_id 1 qp 0 inflight 0 + * @cliexend + * Example of displaying the DPDK Crypto device data when enabled: + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto, static) = { + .path = "show dpdk crypto devices", + .short_help = "show dpdk crypto devices", + .function = show_dpdk_crypto_fn, +}; + +/* *INDENT-ON* */ +static u8 * +format_crypto_worker (u8 * s, va_list * args) +{ + u32 thread_idx = va_arg (*args, u32); + u8 verbose = (u8) va_arg (*args, u32); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + crypto_resource_t *res; + u16 *res_idx; + char *pre, *ind; + u32 i; + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + s = format (s, "Thread %u (%v):\n", thread_idx, + vlib_worker_threads[thread_idx].name); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + ind = " "; + res = vec_elt_at_index (dcm->resource, res_idx[0]); + s = format (s, "%s%-20s dev-id %2u queue-pair %2u\n", + ind, vec_elt_at_index (dcm->dev, res->dev_id)->name, + res->dev_id, res->qp_id); + + ind = " "; + if (verbose) + { + s = format (s, "%sCipher:", ind); + pre = " "; + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (cwm->cipher_resource_idx[i] == res_idx[0]) + { + s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + s = format (s, "%sAuth:", ind); + pre = " "; + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (cwm->auth_resource_idx[i] == res_idx[0]) + { + s = format (s, "%s%s", pre, dcm->auth_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + } + } + /* *INDENT-ON* */ + + return s; +} + +static clib_error_t * +common_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd, u8 verbose) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + clib_error_t *error = NULL; + u32 i; + u8 skip_master; + + if (!dcm->enabled) + { + vlib_cli_output (vm, "\nDPDK Cryptodev support is disabled\n"); + return error; + } + + skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach_index (i, dcm->workers_main) + { + if (i < skip_master) + continue; + + vlib_cli_output (vm, "%U\n", format_crypto_worker, i, verbose); + } + /* *INDENT-ON* */ + + return error; +} + +static clib_error_t * +show_dpdk_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return common_crypto_placement_fn (vm, input, cmd, 0); +} + +static clib_error_t * +show_dpdk_crypto_placement_v_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return common_crypto_placement_fn (vm, input, cmd, 1); +} + +/*? + * This command is used to display the DPDK Crypto device placement. + * + * @cliexpar + * Example of displaying the DPDK Crypto device placement: + * @cliexstart{show dpdk crypto placement} + * vpp# show dpdk crypto placement + * Thread 1 (vpp_wk_0): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 + * + * Thread 2 (vpp_wk_1): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_placement, static) = { + .path = "show dpdk crypto placement", + .short_help = "show dpdk crypto placement", + .function = show_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +/*? + * This command is used to display the DPDK Crypto device placement + * with verbose output. + * + * @cliexpar + * Example of displaying the DPDK Crypto device placement verbose: + * @cliexstart{show dpdk crypto placement verbose} + * vpp# show dpdk crypto placement verbose + * Thread 1 (vpp_wk_0): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 + * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: + * + * Thread 2 (vpp_wk_1): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 + * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: + * + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_placement_v, static) = { + .path = "show dpdk crypto placement verbose", + .short_help = "show dpdk crypto placement verbose", + .function = show_dpdk_crypto_placement_v_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +set_dpdk_crypto_placement_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + crypto_dev_t *dev; + u32 thread_idx, i; + u16 res_idx, *idx; + u8 dev_idx, auto_en = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return clib_error_return (0, "invalid syntax"); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) + ; + else if (unformat (line_input, "auto")) + auto_en = 1; + else + { + unformat_free (line_input); + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + } + + unformat_free (line_input); + + if (auto_en) + { + crypto_auto_placement (); + return 0; + } + + /* TODO support device name */ + + if (!(dev_idx < vec_len (dcm->dev))) + return clib_error_return (0, "please specify valid device index"); + + if (thread_idx != (u32) ~ 0 && !(thread_idx < vec_len (dcm->workers_main))) + return clib_error_return (0, "invalid thread index"); + + dev = vec_elt_at_index (dcm->dev, dev_idx); + if (!(vec_len (dev->free_resources))) + return clib_error_return (0, "all device resources are being used"); + + /* Check thread is not already using the device */ + /* *INDENT-OFF* */ + vec_foreach (idx, dev->used_resources) + if (dcm->resource[idx[0]].thread_idx == thread_idx) + return clib_error_return (0, "thread %u already using device %u", + thread_idx, dev_idx); + /* *INDENT-ON* */ + + res_idx = vec_pop (dev->free_resources); + vec_add1 (dev->used_resources, res_idx); + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + ASSERT (dcm->resource[res_idx].thread_idx == (u16) ~ 0); + dcm->resource[res_idx].thread_idx = thread_idx; + + /* Add device to vector of polling resources */ + vec_add1 (cwm->resource_idx, res_idx); + + /* Set device as default for all supported algos */ + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i]) + { + if (cwm->cipher_resource_idx[i] == (u16) ~ 0) + dcm->cipher_algs[i].disabled--; + cwm->cipher_resource_idx[i] = res_idx; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i]) + { + if (cwm->auth_resource_idx[i] == (u16) ~ 0) + dcm->auth_algs[i].disabled--; + cwm->auth_resource_idx[i] = res_idx; + } + + /* Check if any unused resource */ + + u8 used = 0; + /* *INDENT-OFF* */ + vec_foreach (idx, cwm->resource_idx) + { + if (idx[0] == res_idx) + continue; + + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + used |= cwm->cipher_resource_idx[i] == idx[0]; + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + used |= cwm->auth_resource_idx[i] == idx[0]; + + vec_elt_at_index (dcm->resource, idx[0])->remove = !used; + } + /* *INDENT-ON* */ + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_dpdk_crypto_placement, static) = { + .path = "set dpdk crypto placement", + .short_help = "set dpdk crypto placement ( | auto)", + .function = set_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +/* + * The thread will not enqueue more operations to the device but will poll + * from it until there are no more inflight operations. +*/ +static void +dpdk_crypto_clear_resource (u16 res_idx) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); + crypto_worker_main_t *cwm = &dcm->workers_main[res->thread_idx]; + u32 i; + + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (cwm->cipher_resource_idx[i] == res_idx) + { + cwm->cipher_resource_idx[i] = (u16) ~ 0; + dcm->cipher_algs[i].disabled++; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (cwm->auth_resource_idx[i] == res_idx) + { + cwm->auth_resource_idx[i] = (u16) ~ 0; + dcm->auth_algs[i].disabled++; + } + + /* Fully remove device on crypto_node once there are no inflights */ + res->remove = 1; +} + +static clib_error_t * +clear_dpdk_crypto_placement_fn (vlib_main_t * vm, + unformat_input_t * + input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + u32 thread_idx = (u32) ~ 0; + u16 *res_idx; + u8 dev_idx = (u8) ~ 0; + u8 free_all = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return clib_error_return (0, "invalid syntax"); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) + ; + else if (unformat (line_input, "%u", &dev_idx)) + free_all = 1; + else + { + unformat_free (line_input); + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + } + + unformat_free (line_input); + + if (!(dev_idx < vec_len (dcm->dev))) + return clib_error_return (0, "invalid device index"); + + dev = vec_elt_at_index (dcm->dev, dev_idx); + + /* Clear all resources placements */ + if (free_all) + { + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + dpdk_crypto_clear_resource (res_idx[0]); + /* *INDENT-ON* */ + + return 0; + } + + if (!(thread_idx < vec_len (dcm->workers_main))) + return clib_error_return (0, "invalid thread index"); + + /* Clear placement of device for given thread index */ + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + if (dcm->resource[res_idx[0]].thread_idx == thread_idx) + break; + /* *INDENT-ON* */ + + if (!(res_idx < vec_end (dev->used_resources))) + return clib_error_return (0, "thread %u is not using device %u", + thread_idx, dev_idx); + + dpdk_crypto_clear_resource (res_idx[0]); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_dpdk_crypto_placement, static) = { + .path = "clear dpdk crypto placement", + .short_help = "clear dpdk crypto placement []", + .function = clear_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +u8 * +format_dpdk_mempool (u8 * s, va_list * args) +{ + struct rte_mempool *mp = va_arg (*args, struct rte_mempool *); + u32 indent = format_get_indent (s); + u32 count = rte_mempool_avail_count (mp); + + s = format (s, "%s\n%Uavailable %7d, allocated %7d total %7d\n", + mp->name, format_white_space, indent + 2, + count, mp->size - count, mp->size); + s = format (s, "%Uphys_addr %p, flags %08x, nb_mem_chunks %u\n", + format_white_space, indent + 2, + mp->mz->iova, mp->flags, mp->nb_mem_chunks); + s = format (s, "%Uelt_size %4u, header_size %3u, trailer_size %u\n", + format_white_space, indent + 2, + mp->elt_size, mp->header_size, mp->trailer_size); + s = format (s, "%Uprivate_data_size %3u, total_elt_size %u\n", + format_white_space, indent + 2, + mp->private_data_size, + mp->elt_size + mp->header_size + mp->trailer_size); + return s; +} + +static clib_error_t * +show_dpdk_crypto_pools_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + if (data->crypto_op) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->crypto_op); + if (data->session_h) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->session_h); + + struct rte_mempool **mp; + vec_foreach (mp, data->session_drv) + if (mp[0]) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, mp[0]); + } + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to display the DPDK Crypto pools information. + * + * @cliexpar + * Example of how to display the DPDK Crypto pools information: + * @cliexstart{show crypto device mapping} + * vpp# show dpdk crypto pools + * crypto_pool_numa1 + * available 15872, allocated 512 total 16384 + * phys_addr 0xf3d2086c0, flags 00000010, nb_mem_chunks 1 + * elt_size 160, header_size 64, trailer_size 96 + * private_data_size 64, total_elt_size 320 + * + * session_h_pool_numa1 + * available 19998, allocated 2 total 20000 + * phys_addr 0xf3c9c4380, flags 00000010, nb_mem_chunks 1 + * elt_size 40, header_size 64, trailer_size 88 + * private_data_size 0, total_elt_size 192 + * + * session_drv0_pool_numa1 + * available 19998, allocated 2 total 20000 + * phys_addr 0xf3ad42d80, flags 00000010, nb_mem_chunks 1 + * elt_size 512, header_size 64, trailer_size 0 + * private_data_size 0, total_elt_size 576 + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_pools, static) = { + .path = "show dpdk crypto pools", + .short_help = "show dpdk crypto pools", + .function = show_dpdk_crypto_pools_fn, +}; +/* *INDENT-ON* */ + +/* TODO Allow user define number of sessions supported */ +/* TODO Allow user define descriptor queue size */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/crypto_node.c b/extras/deprecated/dpdk-ipsec/crypto_node.c new file mode 100644 index 00000000000..893848c05b6 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/crypto_node.c @@ -0,0 +1,330 @@ +/* + *------------------------------------------------------------------ + * crypto_node.c - DPDK Cryptodev input node + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a opy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define foreach_dpdk_crypto_input_error \ + _(DQ_COPS, "Crypto ops dequeued") \ + _(AUTH_FAILED, "Crypto verification failed") \ + _(STATUS, "Crypto operation failed") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_ERROR_##f, + foreach_dpdk_crypto_input_error +#undef _ + DPDK_CRYPTO_INPUT_N_ERROR, +} dpdk_crypto_input_error_t; + +static char *dpdk_crypto_input_error_strings[] = { +#define _(n, s) s, + foreach_dpdk_crypto_input_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_crypto_input_node; + +typedef struct +{ + /* dev id of this cryptodev */ + u16 dev_id; + u16 next_index; +} dpdk_crypto_input_trace_t; + +static u8 * +format_dpdk_crypto_input_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + dpdk_crypto_input_trace_t *t = va_arg (*args, dpdk_crypto_input_trace_t *); + + s = format (s, "cryptodev-id %d next-index %d", t->dev_id, t->next_index); + + return s; +} + +static_always_inline void +dpdk_crypto_input_check_op (vlib_main_t * vm, vlib_node_runtime_t * node, + struct rte_crypto_op *op0, u16 * next) +{ + if (PREDICT_FALSE (op0->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + { + next[0] = DPDK_CRYPTO_INPUT_NEXT_DROP; + vlib_node_increment_counter (vm, + node->node_index, + DPDK_CRYPTO_INPUT_ERROR_STATUS, 1); + /* if auth failed */ + if (op0->status == RTE_CRYPTO_OP_STATUS_AUTH_FAILED) + vlib_node_increment_counter (vm, + node->node_index, + DPDK_CRYPTO_INPUT_ERROR_AUTH_FAILED, 1); + } +} + +always_inline void +dpdk_crypto_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, + u8 dev_id, u32 * bis, u16 * nexts, u32 n_deq) +{ + u32 n_left, n_trace; + + if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)))) + { + n_left = n_deq; + + while (n_trace && n_left) + { + vlib_buffer_t *b0; + u16 next; + u32 bi; + + bi = bis[0]; + next = nexts[0]; + + b0 = vlib_get_buffer (vm, bi); + + if (PREDICT_TRUE + (vlib_trace_buffer (vm, node, next, b0, /* follow_chain */ 0))) + { + dpdk_crypto_input_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->dev_id = dev_id; + tr->next_index = next; + n_trace--; + } + + n_left--; + nexts++; + bis++; + } + vlib_set_trace_count (vm, node, n_trace); + } +} + +static_always_inline u32 +dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm, + vlib_node_runtime_t * node, crypto_resource_t * res) +{ + u8 numa = rte_socket_id (); + u32 n_ops, total_n_deq, n_deq[2]; + u32 bis[VLIB_FRAME_SIZE], *bi; + u16 nexts[VLIB_FRAME_SIZE], *next; + struct rte_crypto_op **ops; + + n_deq[0] = 0; + n_deq[1] = 0; + bi = bis; + next = nexts; + ops = cwm->ops; + + n_ops = total_n_deq = rte_cryptodev_dequeue_burst (res->dev_id, + res->qp_id, + ops, VLIB_FRAME_SIZE); + /* no op dequeued, do not proceed */ + if (n_ops == 0) + return 0; + + while (n_ops >= 4) + { + struct rte_crypto_op *op0, *op1, *op2, *op3; + + /* Prefetch next iteration. */ + if (n_ops >= 8) + { + CLIB_PREFETCH (ops[4], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[5], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[6], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[7], CLIB_CACHE_LINE_BYTES, LOAD); + + CLIB_PREFETCH (crypto_op_get_priv (ops[4]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[5]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[6]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[7]), + CLIB_CACHE_LINE_BYTES, LOAD); + } + + op0 = ops[0]; + op1 = ops[1]; + op2 = ops[2]; + op3 = ops[3]; + + next[0] = crypto_op_get_priv (op0)->next; + next[1] = crypto_op_get_priv (op1)->next; + next[2] = crypto_op_get_priv (op2)->next; + next[3] = crypto_op_get_priv (op3)->next; + + bi[0] = crypto_op_get_priv (op0)->bi; + bi[1] = crypto_op_get_priv (op1)->bi; + bi[2] = crypto_op_get_priv (op2)->bi; + bi[3] = crypto_op_get_priv (op3)->bi; + + n_deq[crypto_op_get_priv (op0)->encrypt] += 1; + n_deq[crypto_op_get_priv (op1)->encrypt] += 1; + n_deq[crypto_op_get_priv (op2)->encrypt] += 1; + n_deq[crypto_op_get_priv (op3)->encrypt] += 1; + + dpdk_crypto_input_check_op (vm, node, op0, next + 0); + dpdk_crypto_input_check_op (vm, node, op1, next + 1); + dpdk_crypto_input_check_op (vm, node, op2, next + 2); + dpdk_crypto_input_check_op (vm, node, op3, next + 3); + + op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op1->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op2->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op3->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* next */ + next += 4; + n_ops -= 4; + ops += 4; + bi += 4; + } + while (n_ops > 0) + { + struct rte_crypto_op *op0; + + op0 = ops[0]; + + next[0] = crypto_op_get_priv (op0)->next; + bi[0] = crypto_op_get_priv (op0)->bi; + + n_deq[crypto_op_get_priv (op0)->encrypt] += 1; + + dpdk_crypto_input_check_op (vm, node, op0, next + 0); + + op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* next */ + next += 1; + n_ops -= 1; + ops += 1; + bi += 1; + } + + vlib_node_increment_counter (vm, node->node_index, + DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq); + + res->inflights[0] -= n_deq[0]; + res->inflights[1] -= n_deq[1]; + + vlib_buffer_enqueue_to_next (vm, node, bis, nexts, total_n_deq); + + dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, total_n_deq); + + crypto_free_ops (numa, cwm->ops, total_n_deq); + + return total_n_deq; +} + +static_always_inline uword +dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm = &dcm->workers_main[vm->thread_index]; + crypto_resource_t *res; + u32 n_deq = 0; + u16 *remove = NULL, *res_idx; + word i; + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + res = vec_elt_at_index (dcm->resource, res_idx[0]); + u32 inflights = res->inflights[0] + res->inflights[1]; + + if (inflights) + n_deq += dpdk_crypto_dequeue (vm, cwm, node, res); + + inflights = res->inflights[0] + res->inflights[1]; + if (PREDICT_FALSE (res->remove && !(inflights))) + vec_add1 (remove, res_idx[0]); + } + /* *INDENT-ON* */ + + /* TODO removal on master thread? */ + if (PREDICT_FALSE (remove != NULL)) + { + /* *INDENT-OFF* */ + vec_foreach (res_idx, remove) + { + i = vec_search (cwm->resource_idx, res_idx[0]); + vec_del1 (cwm->resource_idx, i); + + res = vec_elt_at_index (dcm->resource, res_idx[0]); + res->thread_idx = (u16) ~0; + res->remove = 0; + + i = vec_search (dcm->dev[res->dev_id].used_resources, res_idx[0]); + ASSERT (i != (u16) ~0); + vec_del1 (dcm->dev[res->dev_id].used_resources, i); + vec_add1 (dcm->dev[res->dev_id].free_resources, res_idx[0]); + } + /* *INDENT-ON* */ + + vec_free (remove); + } + + return n_deq; +} + +VLIB_NODE_FN (dpdk_crypto_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_crypto_input_inline (vm, node, from_frame); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_crypto_input_node) = +{ + .name = "dpdk-crypto-input", + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, + .format_trace = format_dpdk_crypto_input_trace, + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, + .n_errors = DPDK_CRYPTO_INPUT_N_ERROR, + .error_strings = dpdk_crypto_input_error_strings, + .n_next_nodes = DPDK_CRYPTO_INPUT_N_NEXT, + .next_nodes = + { +#define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n, + foreach_dpdk_crypto_input_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/dir.dox b/extras/deprecated/dpdk-ipsec/dir.dox new file mode 100644 index 00000000000..05504541abb --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/dir.dox @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Doxygen directory documentation */ + +/** +@dir src/plugins/dpdk/ipsec +@brief IPSec ESP encrypt/decrypt using DPDK Cryptodev API. + +This directory contains the source code for the DPDK Crypto abstraction layer. + +*/ +/*? %%clicmd:group_label DPDK Crypto %% ?*/ +/*? %%syscfg:group_label DPDK Crypto %% ?*/ diff --git a/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md new file mode 100644 index 00000000000..8cf51f07c03 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md @@ -0,0 +1,87 @@ +# VPP IPSec implementation using DPDK Cryptodev API {#dpdk_crypto_ipsec_doc} + +This document is meant to contain all related information about implementation and usability. + + +## VPP IPsec with DPDK Cryptodev + +DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)). + +When there are enough Cryptodev resources for all workers, the node graph is reconfigured by adding and changing the default next nodes. + +The following nodes are added: +* dpdk-crypto-input : polling input node, dequeuing from crypto devices. +* dpdk-esp-encrypt : internal node. +* dpdk-esp-decrypt : internal node. +* dpdk-esp-encrypt-post : internal node. +* dpdk-esp-decrypt-post : internal node. + +Set new default next nodes: +* for esp encryption: esp-encrypt -> dpdk-esp-encrypt +* for esp decryption: esp-decrypt -> dpdk-esp-decrypt + + +### How to enable VPP IPSec with DPDK Cryptodev support + +When building DPDK with VPP, Cryptodev support is always enabled. + +Additionally, on x86_64 platforms, DPDK is built with SW crypto support. + + +### Crypto Resources allocation + +VPP allocates crypto resources based on a best effort approach: +* first allocate Hardware crypto resources, then Software. +* if there are not enough crypto resources for all workers, the graph node is not modified and the default VPP IPsec implementation based in OpenSSL is used. The following message is displayed: + + 0: dpdk_ipsec_init: not enough Cryptodevs, default to OpenSSL IPsec + + +### Configuration example + +To enable DPDK Cryptodev the user just need to provide cryptodevs in the startup.conf. + +Below is an example startup.conf, it is not meant to be a default configuration: + +``` +dpdk { + dev 0000:81:00.0 + dev 0000:81:00.1 + dev 0000:85:01.0 + dev 0000:85:01.1 + vdev crypto_aesni_mb0,socket_id=1 + vdev crypto_aesni_mb1,socket_id=1 +} +``` + +In the above configuration: +* 0000:81:01.0 and 0000:81:01.1 are Ethernet device BDFs. +* 0000:85:01.0 and 0000:85:01.1 are Crypto device BDFs and they require the same driver binding as DPDK Ethernet devices but they do not support any extra configuration options. +* Two AESNI-MB Software (Virtual) Cryptodev PMDs are created in NUMA node 1. + +For further details refer to [DPDK Crypto Device Driver documentation](http://dpdk.org/doc/guides/cryptodevs/index.html) + +### Operational data + +The following CLI command displays the Cryptodev/Worker mapping: + + show crypto device mapping [verbose] + + +### nasm + +Building the DPDK Crypto Libraries requires the open source project nasm (The Netwide +Assembler) to be installed. Recommended version of nasm is 2.12.02. Minimum supported +version of nasm is 2.11.06. Use the following command to determine the current nasm version: + + nasm -v + +CentOS 7.3 and earlier and Fedora 21 and earlier use unsupported versions +of nasm. Use the following set of commands to build a supported version: + + wget http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2 + tar -xjvf nasm-2.12.02.tar.bz2 + cd nasm-2.12.02/ + ./configure + make + sudo make install diff --git a/extras/deprecated/dpdk-ipsec/esp_decrypt.c b/extras/deprecated/dpdk-ipsec/esp_decrypt.c new file mode 100644 index 00000000000..9a782abeb94 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/esp_decrypt.c @@ -0,0 +1,739 @@ +/* + * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a opy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define foreach_esp_decrypt_next \ +_(DROP, "error-drop") \ +_(IP4_INPUT, "ip4-input-no-checksum") \ +_(IP6_INPUT, "ip6-input") + +#define _(v, s) ESP_DECRYPT_NEXT_##v, +typedef enum +{ + foreach_esp_decrypt_next +#undef _ + ESP_DECRYPT_N_NEXT, +} esp_decrypt_next_t; + +#define foreach_esp_decrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(DECRYPTION_FAILED, "ESP decryption failed") \ + _(REPLAY, "SA replayed packet") \ + _(NOT_IP, "Not IP packet (dropped)") \ + _(ENQ_FAIL, "Enqueue decrypt failed (queue full)") \ + _(DISCARD, "Not enough crypto operations") \ + _(BAD_LEN, "Invalid ciphertext length") \ + _(SESSION, "Failed to get crypto session") \ + _(NOSUP, "Cipher/Auth not supported") + + +typedef enum +{ +#define _(sym,str) ESP_DECRYPT_ERROR_##sym, + foreach_esp_decrypt_error +#undef _ + ESP_DECRYPT_N_ERROR, +} esp_decrypt_error_t; + +static char *esp_decrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_decrypt_node; +extern vlib_node_registration_t dpdk_esp6_decrypt_node; + +typedef struct +{ + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; + u8 packet_data[64]; +} esp_decrypt_trace_t; + +/* packet trace format function */ +static u8 * +format_esp_decrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + s = format (s, "%U%U", + format_white_space, indent, format_esp_header, t->packet_data); + return s; +} + +always_inline uword +dpdk_esp_decrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6) +{ + u32 n_left_from, *from, *to_next, next_index, thread_index; + u32 thread_idx = vlib_get_thread_index (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = 0; + ipsec_sa_t *sa0 = 0; + crypto_alg_t *cipher_alg = 0, *auth_alg = 0; + struct rte_cryptodev_sym_session *session = 0; + u32 ret, last_sa_index = ~0; + u8 numa = rte_socket_id (); + u8 is_aead = 0; + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_idx); + struct rte_crypto_op **ops = cwm->ops; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + thread_index = vm->thread_index; + + ret = crypto_alloc_ops (numa, ops, n_left_from); + if (ret) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DISCARD, n_left_from); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DISCARD, n_left_from); + /* Discard whole frame */ + vlib_buffer_free (vm, from, n_left_from); + return n_left_from; + } + + next_index = ESP_DECRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + clib_error_t *error; + u32 bi0, sa_index0, iv_size; + u8 trunc_size; + vlib_buffer_t *b0; + esp_header_t *esp0; + struct rte_mbuf *mb0; + struct rte_crypto_op *op; + u16 res_idx; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + mb0 = rte_mbuf_from_vlib_buffer (b0); + esp0 = vlib_buffer_get_current (b0); + + /* ih0/ih6_0 */ + CLIB_PREFETCH (esp0, sizeof (esp0[0]) + 16, LOAD); + /* mb0 */ + CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); + + op = ops[0]; + ops += 1; + ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + dpdk_op_priv_t *priv = crypto_op_get_priv (op); + /* store bi in op private */ + priv->bi = bi0; + priv->encrypt = 0; + + u16 op_len = + sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); + CLIB_PREFETCH (op, op_len, STORE); + + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + vlib_prefetch_combined_counter (&ipsec_sa_counters, + thread_index, sa_index0); + + if (sa_index0 != last_sa_index) + { + sa0 = ipsec_sa_get (sa_index0); + + cipher_alg = + vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + + is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); + if (is_aead) + auth_alg = cipher_alg; + + res_idx = get_resource (cwm, sa0); + + if (PREDICT_FALSE (res_idx == (u16) ~ 0)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_NOSUP, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + res = vec_elt_at_index (dcm->resource, res_idx); + + error = crypto_get_session (&session, sa_index0, res, cwm, 0); + if (PREDICT_FALSE (error || !session)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_SESSION, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_SESSION, + 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + last_sa_index = sa_index0; + } + + /* anti-replay check */ + if (ipsec_sa_anti_replay_check + (sa0, clib_host_to_net_u32 (esp0->seq))) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + if (is_ip6) + priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT6_POST; + else + { + priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST; + b0->flags |= VNET_BUFFER_F_IS_IP4; + } + + /* FIXME multi-seg */ + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, b0->current_length); + + res->ops[res->n_ops] = op; + res->bi[res->n_ops] = bi0; + res->n_ops += 1; + + /* Convert vlib buffer to mbuf */ + mb0->data_len = b0->current_length; + mb0->pkt_len = b0->current_length; + mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; + + trunc_size = auth_alg->trunc_size; + iv_size = cipher_alg->iv_len; + + /* Outer IP header has already been stripped */ + u16 payload_len = + b0->current_length - sizeof (esp_header_t) - iv_size - trunc_size; + + ASSERT (payload_len >= 4); + + if (payload_len & (cipher_alg->boundary - 1)) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + res->n_ops -= 1; + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + u32 cipher_off, cipher_len; + u32 auth_len = 0; + u8 *aad = NULL; + + u8 *iv = (u8 *) (esp0 + 1); + + dpdk_gcm_cnt_blk *icb = &priv->cb; + + cipher_off = sizeof (esp_header_t) + iv_size; + cipher_len = payload_len; + + u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; + u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); + + if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC) + clib_memcpy_fast (icb, iv, 16); + else /* CTR/GCM */ + { + u32 *_iv = (u32 *) iv; + + crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]); + } + + if (is_aead) + { + aad = priv->aad; + u32 *_aad = (u32 *) aad; + clib_memcpy_fast (aad, esp0, 8); + + /* _aad[3] should always be 0 */ + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) + { + _aad[2] = _aad[1]; + _aad[1] = clib_host_to_net_u32 (sa0->seq_hi); + } + else + _aad[2] = 0; + } + else + { + auth_len = sizeof (esp_header_t) + iv_size + payload_len; + + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + clib_memcpy_fast (priv->icv, digest, trunc_size); + u32 *_digest = (u32 *) digest; + _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); + auth_len += sizeof (sa0->seq_hi); + + digest = priv->icv; + digest_paddr = + op->phys_addr + (uintptr_t) priv->icv - (uintptr_t) op; + } + } + + crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, + 0, auth_len, aad, digest, digest_paddr); + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + clib_memcpy_fast (tr->packet_data, vlib_buffer_get_current (b0), + sizeof (esp_header_t)); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + if (is_ip6) + { + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); + } + else + { + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); + } + + crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_decrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node) = { + .name = "dpdk-esp4-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_decrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_decrypt_node) = { + .name = "dpdk-esp6-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * Decrypt Post Node + */ + +#define foreach_esp_decrypt_post_error \ + _(PKTS, "ESP post pkts") + +typedef enum +{ +#define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym, + foreach_esp_decrypt_post_error +#undef _ + ESP_DECRYPT_POST_N_ERROR, +} esp_decrypt_post_error_t; + +static char *esp_decrypt_post_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_post_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_decrypt_post_node; +extern vlib_node_registration_t dpdk_esp6_decrypt_post_node; + +static u8 * +format_esp_decrypt_post_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + + ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; + if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + s = + format (s, "%U%U", format_white_space, indent, format_ip6_header, ih4); + else + s = + format (s, "%U%U", format_white_space, indent, format_ip4_header, ih4); + + return s; +} + +always_inline uword +dpdk_esp_decrypt_post_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6) +{ + u32 n_left_from, *from, *to_next = 0, next_index; + ipsec_sa_t *sa0; + u32 sa_index0 = ~0; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + esp_footer_t *f0; + u32 bi0, iv_size, next0; + vlib_buffer_t *b0 = 0; + ip4_header_t *ih4 = 0, *oh4 = 0; + ip6_header_t *ih6 = 0, *oh6 = 0; + crypto_alg_t *cipher_alg, *auth_alg; + esp_header_t *esp0; + u8 trunc_size, is_aead; + u16 udp_encap_adv = 0; + + next0 = ESP_DECRYPT_NEXT_DROP; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + esp0 = vlib_buffer_get_current (b0); + + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + sa0 = ipsec_sa_get (sa_index0); + + to_next[0] = bi0; + to_next += 1; + + cipher_alg = vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + is_aead = cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD; + if (is_aead) + auth_alg = cipher_alg; + + trunc_size = auth_alg->trunc_size; + + iv_size = cipher_alg->iv_len; + + ipsec_sa_anti_replay_advance (sa0, + clib_host_to_net_u32 (esp0->seq)); + + /* if UDP encapsulation is used adjust the address of the IP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0) + && (b0->flags & VNET_BUFFER_F_IS_IP4)) + { + udp_encap_adv = sizeof (udp_header_t); + } + + if (b0->flags & VNET_BUFFER_F_IS_IP4) + ih4 = (ip4_header_t *) + ((u8 *) esp0 - udp_encap_adv - sizeof (ip4_header_t)); + else + ih4 = (ip4_header_t *) ((u8 *) esp0 - sizeof (ip6_header_t)); + + vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size); + + b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + f0 = (esp_footer_t *) (vlib_buffer_get_tail (b0) - trunc_size - 2); + b0->current_length -= (f0->pad_length + trunc_size + 2); +#if 0 + /* check padding */ + const u8 *padding = vlib_buffer_get_tail (b0); + if (PREDICT_FALSE (memcmp (padding, pad_data, f0->pad_length))) + { + clib_warning ("bad padding"); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } +#endif + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) + { + if (f0->next_header == IP_PROTOCOL_IP_IN_IP) + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + else if (f0->next_header == IP_PROTOCOL_IPV6) + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + else + { + clib_warning ("next header: 0x%x", f0->next_header); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } + } + else /* transport mode */ + { + if ((ih4->ip_version_and_header_length & 0xF0) == 0x40) + { + u16 ih4_len = ip4_header_bytes (ih4); + vlib_buffer_advance (b0, -ih4_len); + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + + oh4 = vlib_buffer_get_current (b0); + memmove (oh4, ih4, ih4_len); + oh4->protocol = f0->next_header; + oh4->length = clib_host_to_net_u16 (b0->current_length); + oh4->checksum = ip4_header_checksum (oh4); + } + else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + { + ih6 = (ip6_header_t *) ih4; + vlib_buffer_advance (b0, -sizeof (ip6_header_t)); + oh6 = vlib_buffer_get_current (b0); + memmove (oh6, ih6, sizeof (ip6_header_t)); + + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + oh6->protocol = f0->next_header; + u16 len = b0->current_length - sizeof (ip6_header_t); + oh6->payload_length = clib_host_to_net_u16 (len); + } + else + { + clib_warning ("next header: 0x%x", f0->next_header); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } + } + + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + ih4 = vlib_buffer_get_current (b0); + clib_memcpy_fast (tr->packet_data, ih4, sizeof (ip6_header_t)); + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, bi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_decrypt_post_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node) = { + .name = "dpdk-esp4-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_decrypt_post_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node) = { + .name = "dpdk-esp6-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/esp_encrypt.c b/extras/deprecated/dpdk-ipsec/esp_encrypt.c new file mode 100644 index 00000000000..ce1b5795995 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/esp_encrypt.c @@ -0,0 +1,709 @@ +/* + * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define foreach_esp_encrypt_next \ +_(DROP, "error-drop") \ +_(IP4_LOOKUP, "ip4-lookup") \ +_(IP6_LOOKUP, "ip6-lookup") \ +_(INTERFACE_OUTPUT, "interface-output") + +#define _(v, s) ESP_ENCRYPT_NEXT_##v, +typedef enum +{ + foreach_esp_encrypt_next +#undef _ + ESP_ENCRYPT_N_NEXT, +} esp_encrypt_next_t; + +#define foreach_esp_encrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(SEQ_CYCLED, "Sequence number cycled") \ + _(ENQ_FAIL, "Enqueue encrypt failed (queue full)") \ + _(DISCARD, "Not enough crypto operations") \ + _(SESSION, "Failed to get crypto session") \ + _(NOSUP, "Cipher/Auth not supported") + + +typedef enum +{ +#define _(sym,str) ESP_ENCRYPT_ERROR_##sym, + foreach_esp_encrypt_error +#undef _ + ESP_ENCRYPT_N_ERROR, +} esp_encrypt_error_t; + +static char *esp_encrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_encrypt_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_encrypt_node; +extern vlib_node_registration_t dpdk_esp6_encrypt_node; +extern vlib_node_registration_t dpdk_esp4_encrypt_tun_node; +extern vlib_node_registration_t dpdk_esp6_encrypt_tun_node; + +typedef struct +{ + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; + u8 packet_data[64]; +} esp_encrypt_trace_t; + +/* packet trace format function */ +static u8 * +format_esp_encrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); + ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; + u32 indent = format_get_indent (s), offset; + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + + if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + { + s = format (s, "%U%U", format_white_space, indent, + format_ip6_header, ih4); + offset = sizeof (ip6_header_t); + } + else + { + s = format (s, "%U%U", format_white_space, indent, + format_ip4_header, ih4); + offset = ip4_header_bytes (ih4); + } + + s = format (s, "\n%U%U", format_white_space, indent, + format_esp_header, t->packet_data + offset); + + return s; +} + +always_inline uword +dpdk_esp_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6, int is_tun) +{ + u32 n_left_from, *from, *to_next, next_index, thread_index; + ipsec_main_t *im = &ipsec_main; + vnet_main_t *vnm = vnet_get_main (); + u32 thread_idx = vlib_get_thread_index (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = 0; + ipsec_sa_t *sa0 = 0; + crypto_alg_t *cipher_alg = 0, *auth_alg = 0; + struct rte_cryptodev_sym_session *session = 0; + u32 ret, last_sa_index = ~0; + u8 numa = rte_socket_id (); + u8 is_aead = 0; + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_idx); + struct rte_crypto_op **ops = cwm->ops; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + thread_index = vm->thread_index; + + ret = crypto_alloc_ops (numa, ops, n_left_from); + if (ret) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, n_left_from); + else + vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, n_left_from); + /* Discard whole frame */ + vlib_buffer_free (vm, from, n_left_from); + return n_left_from; + } + + next_index = ESP_ENCRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + clib_error_t *error; + u32 bi0, bi1; + vlib_buffer_t *b0, *b1; + u32 sa_index0; + ip4_and_esp_header_t *ih0, *oh0 = 0; + ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; + ip4_and_udp_and_esp_header_t *ouh0 = 0; + esp_header_t *esp0; + esp_footer_t *f0; + u8 next_hdr_type; + u32 iv_size; + u16 orig_sz; + u8 trunc_size; + u16 rewrite_len; + u16 udp_encap_adv = 0; + struct rte_mbuf *mb0; + struct rte_crypto_op *op; + u16 res_idx; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ih0 = vlib_buffer_get_current (b0); + mb0 = rte_mbuf_from_vlib_buffer (b0); + + /* ih0/ih6_0 */ + CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD); + /* f0 */ + CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE); + /* mb0 */ + CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); + + if (n_left_from > 1) + { + bi1 = from[1]; + b1 = vlib_get_buffer (vm, bi1); + + CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b1->data - CLIB_CACHE_LINE_BYTES, + CLIB_CACHE_LINE_BYTES, STORE); + } + + op = ops[0]; + ops += 1; + ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + dpdk_op_priv_t *priv = crypto_op_get_priv (op); + /* store bi in op private */ + priv->bi = bi0; + priv->encrypt = 1; + + u16 op_len = + sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); + CLIB_PREFETCH (op, op_len, STORE); + + if (is_tun) + { + /* we are on a ipsec tunnel's feature arc */ + vnet_buffer (b0)->ipsec.sad_index = + sa_index0 = ipsec_tun_protect_get_sa_out + (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + } + else + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + + if (sa_index0 != last_sa_index) + { + sa0 = ipsec_sa_get (sa_index0); + + cipher_alg = + vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + + is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); + + if (is_aead) + auth_alg = cipher_alg; + + res_idx = get_resource (cwm, sa0); + + if (PREDICT_FALSE (res_idx == (u16) ~ 0)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + res = vec_elt_at_index (dcm->resource, res_idx); + + error = crypto_get_session (&session, sa_index0, res, cwm, 1); + if (PREDICT_FALSE (error || !session)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + last_sa_index = sa_index0; + } + + if (PREDICT_FALSE (esp_seq_advance (sa0))) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + //TODO: rekey SA + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + orig_sz = b0->current_length; + + /* TODO multi-seg support - total_length_not_including_first_buffer */ + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, b0->current_length); + + /* Update tunnel interface tx counters */ + if (is_tun) + vlib_increment_combined_counter + (vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + thread_index, vnet_buffer (b0)->sw_if_index[VLIB_TX], + 1, b0->current_length); + + res->ops[res->n_ops] = op; + res->bi[res->n_ops] = bi0; + res->n_ops += 1; + + dpdk_gcm_cnt_blk *icb = &priv->cb; + + crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); + + iv_size = cipher_alg->iv_len; + trunc_size = auth_alg->trunc_size; + + /* if UDP encapsulation is used adjust the address of the IP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6) + udp_encap_adv = sizeof (udp_header_t); + + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) + { + rewrite_len = 0; + if (!ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) /* ip4 */ + { + /* in tunnel mode send it back to FIB */ + priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP; + u8 adv = sizeof (ip4_header_t) + udp_encap_adv + + sizeof (esp_header_t) + iv_size; + vlib_buffer_advance (b0, -adv); + oh0 = vlib_buffer_get_current (b0); + ouh0 = vlib_buffer_get_current (b0); + next_hdr_type = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + /* + * oh0->ip4.ip_version_and_header_length = 0x45; + * oh0->ip4.tos = ih0->ip4.tos; + * oh0->ip4.fragment_id = 0; + * oh0->ip4.flags_and_fragment_offset = 0; + */ + oh0->ip4.checksum_data_64[0] = + clib_host_to_net_u64 (0x45ULL << 56); + /* + * oh0->ip4.ttl = 254; + * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + */ + oh0->ip4.checksum_data_32[2] = + clib_host_to_net_u32 (0xfe320000); + + oh0->ip4.src_address.as_u32 = + sa0->tunnel.t_src.ip.ip4.as_u32; + oh0->ip4.dst_address.as_u32 = + sa0->tunnel.t_dst.ip.ip4.as_u32; + + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) + { + oh0->ip4.protocol = IP_PROTOCOL_UDP; + esp0 = &ouh0->esp; + } + else + esp0 = &oh0->esp; + esp0->spi = clib_host_to_net_u32 (sa0->spi); + esp0->seq = clib_host_to_net_u32 (sa0->seq); + } + else + { + /* ip6 */ + /* in tunnel mode send it back to FIB */ + priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP; + + u8 adv = + sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size; + vlib_buffer_advance (b0, -adv); + ih6_0 = (ip6_and_esp_header_t *) ih0; + oh6_0 = vlib_buffer_get_current (b0); + + next_hdr_type = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + + oh6_0->ip6.ip_version_traffic_class_and_flow_label = + ih6_0->ip6.ip_version_traffic_class_and_flow_label; + + oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + oh6_0->ip6.hop_limit = 254; + oh6_0->ip6.src_address.as_u64[0] = + sa0->tunnel.t_src.ip.ip6.as_u64[0]; + oh6_0->ip6.src_address.as_u64[1] = + sa0->tunnel.t_src.ip.ip6.as_u64[1]; + oh6_0->ip6.dst_address.as_u64[0] = + sa0->tunnel.t_dst.ip.ip6.as_u64[0]; + oh6_0->ip6.dst_address.as_u64[1] = + sa0->tunnel.t_dst.ip.ip6.as_u64[1]; + esp0 = &oh6_0->esp; + oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi); + oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq); + } + + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + else /* transport mode */ + { + if (is_tun) + { + rewrite_len = 0; + priv->next = DPDK_CRYPTO_INPUT_NEXT_MIDCHAIN; + } + else + { + priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT; + rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length; + } + u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv; + vlib_buffer_advance (b0, -adv - rewrite_len); + u8 *src = ((u8 *) ih0) - rewrite_len; + u8 *dst = vlib_buffer_get_current (b0); + oh0 = vlib_buffer_get_current (b0) + rewrite_len; + ouh0 = vlib_buffer_get_current (b0) + rewrite_len; + + if (is_ip6) + { + orig_sz -= sizeof (ip6_header_t); + ih6_0 = (ip6_and_esp_header_t *) ih0; + next_hdr_type = ih6_0->ip6.protocol; + memmove (dst, src, rewrite_len + sizeof (ip6_header_t)); + oh6_0 = (ip6_and_esp_header_t *) oh0; + oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = &oh6_0->esp; + } + else /* ipv4 */ + { + u16 ip_size = ip4_header_bytes (&ih0->ip4); + orig_sz -= ip_size; + next_hdr_type = ih0->ip4.protocol; + memmove (dst, src, rewrite_len + ip_size); + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) + { + oh0->ip4.protocol = IP_PROTOCOL_UDP; + esp0 = (esp_header_t *) + (((u8 *) oh0) + ip_size + udp_encap_adv); + } + else + { + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); + } + } + esp0->spi = clib_host_to_net_u32 (sa0->spi); + esp0->seq = clib_host_to_net_u32 (sa0->seq); + } + + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) + { + ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.checksum = 0; + } + ASSERT (is_pow2 (cipher_alg->boundary)); + u16 mask = cipher_alg->boundary - 1; + u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask; + u8 pad_bytes = pad_payload_len - 2 - orig_sz; + + u8 *padding = + vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size); + + /* The extra pad bytes would be overwritten by the digest */ + if (pad_bytes) + clib_memcpy_fast (padding, pad_data, 16); + + f0 = (esp_footer_t *) (padding + pad_bytes); + f0->pad_length = pad_bytes; + f0->next_header = next_hdr_type; + + if (oh6_0) + { + u16 len = b0->current_length - sizeof (ip6_header_t); + oh6_0->ip6.payload_length = + clib_host_to_net_u16 (len - rewrite_len); + } + else if (oh0) + { + oh0->ip4.length = + clib_host_to_net_u16 (b0->current_length - rewrite_len); + oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) + { + ouh0->udp.length = + clib_host_to_net_u16 (clib_net_to_host_u16 + (ouh0->ip4.length) - + ip4_header_bytes (&ouh0->ip4)); + } + } + else /* should never happen */ + clib_warning ("No outer header found for ESP packet"); + + b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + + /* mbuf packet starts at ESP header */ + mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); + mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); + mb0->data_off = ((void *) esp0) - mb0->buf_addr; + + u32 cipher_off, cipher_len, auth_len = 0; + u32 *aad = NULL; + + u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; + u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); + + if (!is_aead && (cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC || + cipher_alg->alg == RTE_CRYPTO_CIPHER_NULL)) + { + cipher_off = sizeof (esp_header_t); + cipher_len = iv_size + pad_payload_len; + } + else /* CTR/GCM */ + { + u32 *esp_iv = (u32 *) (esp0 + 1); + esp_iv[0] = sa0->seq; + esp_iv[1] = sa0->seq_hi; + + cipher_off = sizeof (esp_header_t) + iv_size; + cipher_len = pad_payload_len; + } + + if (is_aead) + { + aad = (u32 *) priv->aad; + aad[0] = esp0->spi; + + /* aad[3] should always be 0 */ + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) + { + aad[1] = clib_host_to_net_u32 (sa0->seq_hi); + aad[2] = esp0->seq; + } + else + { + aad[1] = esp0->seq; + aad[2] = 0; + } + } + else + { + auth_len = + vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 *_digest = (u32 *) digest; + _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); + auth_len += 4; + } + } + + crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, + 0, auth_len, (u8 *) aad, digest, digest_paddr); + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_encrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + u8 *p = vlib_buffer_get_current (b0); + if (!ipsec_sa_is_set_IS_TUNNEL (sa0) && !is_tun) + p += vnet_buffer (b0)->ip.save_rewrite_length; + clib_memcpy_fast (tr->packet_data, p, sizeof (tr->packet_data)); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + if (is_ip6) + { + vlib_node_increment_counter (vm, + (is_tun ? + dpdk_esp6_encrypt_tun_node.index : + dpdk_esp6_encrypt_node.index), + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); + } + else + { + vlib_node_increment_counter (vm, + (is_tun ? + dpdk_esp4_encrypt_tun_node.index : + dpdk_esp4_encrypt_node.index), + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); + } + + crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = { + .name = "dpdk-esp4-encrypt", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = { + .name = "dpdk-esp6-encrypt", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = { + .name = "dpdk-esp4-encrypt-tun", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = { + .name = "dpdk-esp6-encrypt-tun", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/ipsec.c b/extras/deprecated/dpdk-ipsec/ipsec.c new file mode 100644 index 00000000000..e260ba7dcc4 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/ipsec.c @@ -0,0 +1,1087 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +dpdk_crypto_main_t dpdk_crypto_main; + +#define EMPTY_STRUCT {0} +#define NUM_CRYPTO_MBUFS 16384 + +static void +algos_init (u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *a; + + vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8); + + { +#define _(v,f,str) \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains; + foreach_ipsec_crypto_alg +#undef _ + } + + /* Minimum boundary for ciphers is 4B, required by ESP */ + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_NULL; + a->boundary = 4; /* 1 */ + a->key_len = 0; + a->iv_len = 0; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 16; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 24; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 32; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; + +#define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD +#define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; + a->trunc_size = 16; + + vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1); + + { +#define _(v,f,str) \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains; + foreach_ipsec_integ_alg +#undef _ + } + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_NULL; + a->key_len = 0; + a->trunc_size = 0; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_MD5_HMAC; + a->key_len = 16; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC; + a->key_len = 20; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 16; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC; + a->key_len = 48; + a->trunc_size = 24; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC; + a->key_len = 64; + a->trunc_size = 32; +} + +static u8 +cipher_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + return (alg - dcm->cipher_algs); +} + +static u8 +auth_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + return (alg - dcm->auth_algs); +} + +static crypto_alg_t * +cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + return NULL; + + /* *INDENT-OFF* */ + vec_foreach (alg, dcm->cipher_algs) + { + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (cap->sym.cipher.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (cap->sym.aead.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; + } + /* *INDENT-ON* */ + + return NULL; +} + +static crypto_alg_t * +auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) || + (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)) + return NULL; + + /* *INDENT-OFF* */ + vec_foreach (alg, dcm->auth_algs) + { + if ((cap->sym.auth.algo == alg->alg) && + (alg->trunc_size == trunc_size)) + return alg; + } + /* *INDENT-ON* */ + + return NULL; +} + +static void +crypto_set_aead_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; + + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_AEAD); + + xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; + xform->aead.algo = c->alg; + xform->aead.key.data = sa->crypto_key.data; + xform->aead.key.length = c->key_len; + xform->aead.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->aead.iv.length = 12; + xform->aead.digest_length = c->trunc_size; + xform->aead.aad_length = ipsec_sa_is_set_USE_ESN (sa) ? 12 : 8; + xform->next = NULL; + + if (is_outbound) + xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + else + xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; +} + +static void +crypto_set_cipher_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; + + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_CIPHER); + + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform->cipher.algo = c->alg; + xform->cipher.key.data = sa->crypto_key.data; + xform->cipher.key.length = c->key_len; + xform->cipher.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->cipher.iv.length = c->iv_len; + xform->next = NULL; + + if (is_outbound) + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + else + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; +} + +static void +crypto_set_auth_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *a; + + a = vec_elt_at_index (dcm->auth_algs, sa->integ_alg); + + ASSERT (a->type == RTE_CRYPTO_SYM_XFORM_AUTH); + + xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; + xform->auth.algo = a->alg; + xform->auth.key.data = sa->integ_key.data; + xform->auth.key.length = a->key_len; + xform->auth.digest_length = a->trunc_size; + xform->next = NULL; + + if (is_outbound) + xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + else + xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; +} + +clib_error_t * +create_sym_session (struct rte_cryptodev_sym_session **session, + u32 sa_idx, + crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + ipsec_sa_t *sa; + struct rte_crypto_sym_xform cipher_xform = { 0 }; + struct rte_crypto_sym_xform auth_xform = { 0 }; + struct rte_crypto_sym_xform *xfs; + struct rte_cryptodev_sym_session **s; + clib_error_t *error = 0; + + sa = ipsec_sa_get (sa_idx); + + if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)) + { + crypto_set_aead_xform (&cipher_xform, sa, is_outbound); + xfs = &cipher_xform; + } + else + { + crypto_set_cipher_xform (&cipher_xform, sa, is_outbound); + crypto_set_auth_xform (&auth_xform, sa, is_outbound); + + if (is_outbound) + { + cipher_xform.next = &auth_xform; + xfs = &cipher_xform; + } + else + { + auth_xform.next = &cipher_xform; + xfs = &auth_xform; + } + } + + data = vec_elt_at_index (dcm->data, res->numa); + clib_spinlock_lock_if_init (&data->lockp); + + /* + * DPDK_VER >= 1708: + * Multiple worker/threads share the session for an SA + * Single session per SA, initialized for each device driver + */ + s = (void *) hash_get (data->session_by_sa_index, sa_idx); + + if (!s) + { + session[0] = rte_cryptodev_sym_session_create (data->session_h); + if (!session[0]) + { + data->session_h_failed += 1; + error = clib_error_return (0, "failed to create session header"); + goto done; + } + hash_set (data->session_by_sa_index, sa_idx, session[0]); + } + else + session[0] = s[0]; + + struct rte_mempool **mp; + mp = vec_elt_at_index (data->session_drv, res->drv_id); + ASSERT (mp[0] != NULL); + + i32 ret = + rte_cryptodev_sym_session_init (res->dev_id, session[0], xfs, mp[0]); + if (ret) + { + data->session_drv_failed[res->drv_id] += 1; + error = clib_error_return (0, "failed to init session for drv %u", + res->drv_id); + goto done; + } + + add_session_by_drv_and_sa_idx (session[0], data, res->drv_id, sa_idx); + +done: + clib_spinlock_unlock_if_init (&data->lockp); + return error; +} + +static void __attribute__ ((unused)) clear_and_free_obj (void *obj) +{ + struct rte_mempool *mp = rte_mempool_from_obj (obj); + + clib_memset (obj, 0, mp->elt_size); + + rte_mempool_put (mp, obj); +} + +/* This is from rte_cryptodev_pmd.h */ +static inline void * +get_session_private_data (const struct rte_cryptodev_sym_session *sess, + uint8_t driver_id) +{ +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + return sess->sess_private_data[driver_id]; +#else + if (unlikely (sess->nb_drivers <= driver_id)) + return 0; + + return sess->sess_data[driver_id].data; +#endif +} + +/* This is from rte_cryptodev_pmd.h */ +static inline void +set_session_private_data (struct rte_cryptodev_sym_session *sess, + uint8_t driver_id, void *private_data) +{ +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + sess->sess_private_data[driver_id] = private_data; +#else + if (unlikely (sess->nb_drivers <= driver_id)) + return; + sess->sess_data[driver_id].data = private_data; +#endif +} + +static clib_error_t * +dpdk_crypto_session_disposal (crypto_session_disposal_t * v, u64 ts) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_session_disposal_t *s; + void *drv_session; + u32 drv_id; + i32 ret; + + /* *INDENT-OFF* */ + vec_foreach (s, v) + { + /* ordered vector by timestamp */ + if (!(s->ts + dcm->session_timeout < ts)) + break; + + vec_foreach_index (drv_id, dcm->drv) + { + drv_session = get_session_private_data (s->session, drv_id); + if (!drv_session) + continue; + + /* + * Custom clear to avoid finding a dev_id for drv_id: + * ret = rte_cryptodev_sym_session_clear (dev_id, drv_session); + * ASSERT (!ret); + */ + clear_and_free_obj (drv_session); + + set_session_private_data (s->session, drv_id, NULL); + } + + if (rte_mempool_from_obj(s->session)) + { + ret = rte_cryptodev_sym_session_free (s->session); + ASSERT (!ret); + } + } + /* *INDENT-ON* */ + + if (s < vec_end (v)) + vec_delete (v, s - v, 0); + else + vec_reset_length (v); + + return 0; +} + +static clib_error_t * +add_del_sa_session (u32 sa_index, u8 is_add) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + struct rte_cryptodev_sym_session *s; + uword *val; + u32 drv_id; + + if (is_add) + return 0; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + clib_spinlock_lock_if_init (&data->lockp); + val = hash_get (data->session_by_sa_index, sa_index); + if (val) + { + s = (struct rte_cryptodev_sym_session *) val[0]; + vec_foreach_index (drv_id, dcm->drv) + { + val = (uword*) get_session_by_drv_and_sa_idx (data, drv_id, sa_index); + if (val) + add_session_by_drv_and_sa_idx(NULL, data, drv_id, sa_index); + } + + hash_unset (data->session_by_sa_index, sa_index); + + u64 ts = unix_time_now_nsec (); + dpdk_crypto_session_disposal (data->session_disposal, ts); + + crypto_session_disposal_t sd; + sd.ts = ts; + sd.session = s; + + vec_add1 (data->session_disposal, sd); + } + clib_spinlock_unlock_if_init (&data->lockp); + } + /* *INDENT-ON* */ + + return 0; +} + +static clib_error_t * +dpdk_ipsec_check_support (ipsec_sa_t * sa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) + switch (sa->crypto_alg) + { + case IPSEC_CRYPTO_ALG_NONE: + case IPSEC_CRYPTO_ALG_AES_GCM_128: + case IPSEC_CRYPTO_ALG_AES_GCM_192: + case IPSEC_CRYPTO_ALG_AES_GCM_256: + break; + default: + return clib_error_return (0, "unsupported integ-alg %U crypto-alg %U", + format_ipsec_integ_alg, sa->integ_alg, + format_ipsec_crypto_alg, sa->crypto_alg); + } + + /* XXX do we need the NONE check? */ + if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE && + dcm->cipher_algs[sa->crypto_alg].disabled) + return clib_error_return (0, "disabled crypto-alg %U", + format_ipsec_crypto_alg, sa->crypto_alg); + + /* XXX do we need the NONE check? */ + if (sa->integ_alg != IPSEC_INTEG_ALG_NONE && + dcm->auth_algs[sa->integ_alg].disabled) + return clib_error_return (0, "disabled integ-alg %U", + format_ipsec_integ_alg, sa->integ_alg); + return NULL; +} + +static void +crypto_parse_capabilities (crypto_dev_t * dev, + const struct rte_cryptodev_capabilities *cap, + u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + u8 len, inc; + + for (; cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; cap++) + { + /* A single capability maps to multiple cipher/auth algorithms */ + switch (cap->sym.xform_type) + { + case RTE_CRYPTO_SYM_XFORM_AEAD: + case RTE_CRYPTO_SYM_XFORM_CIPHER: + inc = cap->sym.cipher.key_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.cipher.key_size.min; + len <= cap->sym.cipher.key_size.max; len += inc) + { + alg = cipher_cap_to_alg (cap, len); + if (!alg) + continue; + dev->cipher_support[cipher_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + inc = cap->sym.auth.digest_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.auth.digest_size.min; + len <= cap->sym.auth.digest_size.max; len += inc) + { + alg = auth_cap_to_alg (cap, len); + if (!alg) + continue; + dev->auth_support[auth_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + default: + ; + } + } +} + +static clib_error_t * +crypto_dev_conf (u8 dev, u16 n_qp, u8 numa) +{ + struct rte_cryptodev_config dev_conf = { 0 }; + struct rte_cryptodev_qp_conf qp_conf = { 0 }; + i32 ret; + u16 qp; + char *error_str; + + dev_conf.socket_id = numa; + dev_conf.nb_queue_pairs = n_qp; + + error_str = "failed to configure crypto device %u"; + ret = rte_cryptodev_configure (dev, &dev_conf); + if (ret < 0) + return clib_error_return (0, error_str, dev); + + error_str = "failed to setup crypto device %u queue pair %u"; + qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; + for (qp = 0; qp < n_qp; qp++) + { +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa, NULL); +#else + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa); +#endif + if (ret < 0) + return clib_error_return (0, error_str, dev, qp); + } + + error_str = "failed to start crypto device %u"; + if (rte_cryptodev_start (dev)) + return clib_error_return (0, error_str, dev); + + return 0; +} + +static void +crypto_scan_devs (u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + struct rte_cryptodev *cryptodev; + struct rte_cryptodev_info info = { 0 }; + crypto_dev_t *dev; + crypto_resource_t *res; + clib_error_t *error; + u32 i; + u16 max_res_idx, res_idx, j; + u8 drv_id; + + vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1, + (crypto_dev_t) EMPTY_STRUCT); + + for (i = 0; i < rte_cryptodev_count (); i++) + { + dev = vec_elt_at_index (dcm->dev, i); + + cryptodev = &rte_cryptodevs[i]; + rte_cryptodev_info_get (i, &info); + + dev->id = i; + dev->name = cryptodev->data->name; + dev->numa = rte_cryptodev_socket_id (i); + dev->features = info.feature_flags; + dev->max_qp = info.max_nb_queue_pairs; + drv_id = info.driver_id; + if (drv_id >= vec_len (dcm->drv)) + vec_validate_init_empty (dcm->drv, drv_id, + (crypto_drv_t) EMPTY_STRUCT); + vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name; + dev->drv_id = drv_id; + vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i); + + if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + continue; + + if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa))) + { + clib_error_report (error); + continue; + } + + max_res_idx = dev->max_qp - 1; + + vec_validate (dev->free_resources, max_res_idx); + + res_idx = vec_len (dcm->resource); + vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx, + (crypto_resource_t) EMPTY_STRUCT, + CLIB_CACHE_LINE_BYTES); + + for (j = 0; j <= max_res_idx; j++) + { + vec_elt (dev->free_resources, max_res_idx - j) = res_idx + j; + res = &dcm->resource[res_idx + j]; + res->dev_id = i; + res->drv_id = drv_id; + res->qp_id = j; + res->numa = dev->numa; + res->thread_idx = (u16) ~ 0; + } + + crypto_parse_capabilities (dev, info.capabilities, n_mains); + } +} + +void +crypto_auto_placement (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res; + crypto_worker_main_t *cwm; + crypto_dev_t *dev; + u32 thread_idx, skip_master; + u16 res_idx, *idx; + u8 used; + u16 i; + + skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_foreach_index (thread_idx, dcm->workers_main) + { + if (vec_len (dev->free_resources) == 0) + break; + + if (thread_idx < skip_master) + continue; + + /* Check thread is not already using the device */ + vec_foreach (idx, dev->used_resources) + if (dcm->resource[idx[0]].thread_idx == thread_idx) + continue; + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + used = 0; + res_idx = vec_pop (dev->free_resources); + + /* Set device only for supported algos */ + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i] && + cwm->cipher_resource_idx[i] == (u16) ~0) + { + dcm->cipher_algs[i].disabled--; + cwm->cipher_resource_idx[i] = res_idx; + used = 1; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i] && + cwm->auth_resource_idx[i] == (u16) ~0) + { + dcm->auth_algs[i].disabled--; + cwm->auth_resource_idx[i] = res_idx; + used = 1; + } + + if (!used) + { + vec_add1 (dev->free_resources, res_idx); + continue; + } + + vec_add1 (dev->used_resources, res_idx); + + res = vec_elt_at_index (dcm->resource, res_idx); + + ASSERT (res->thread_idx == (u16) ~0); + res->thread_idx = thread_idx; + + /* Add device to vector of polling resources */ + vec_add1 (cwm->resource_idx, res_idx); + } + } + /* *INDENT-ON* */ +} + +static void +crypto_op_init (struct rte_mempool *mempool, + void *_arg __attribute__ ((unused)), + void *_obj, unsigned i __attribute__ ((unused))) +{ + struct rte_crypto_op *op = _obj; + + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op->phys_addr = rte_mempool_virt2iova (_obj); + op->mempool = mempool; +} + +static clib_error_t * +crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + dpdk_config_main_t *conf = &dpdk_config_main; + crypto_data_t *data; + u8 *pool_name; + u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private); + struct rte_crypto_op_pool_private *priv; + struct rte_mempool *mp; + + data = vec_elt_at_index (dcm->data, numa); + + /* Already allocated */ + if (data->crypto_op) + return NULL; + + pool_name = format (0, "crypto_pool_numa%u%c", numa, 0); + + if (conf->num_crypto_mbufs == 0) + conf->num_crypto_mbufs = NUM_CRYPTO_MBUFS; + + mp = rte_mempool_create ((char *) pool_name, conf->num_crypto_mbufs, + crypto_op_len (), 512, pool_priv_size, NULL, NULL, + crypto_op_init, NULL, numa, 0); + + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create crypto op mempool"); + + /* Initialize mempool private data */ + priv = rte_mempool_get_priv (mp); + priv->priv_size = pool_priv_size; + priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + + data->crypto_op = mp; + + return NULL; +} + +static clib_error_t * +crypto_create_session_h_pool (vlib_main_t * vm, u8 numa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + struct rte_mempool *mp; + u32 elt_size; + + data = vec_elt_at_index (dcm->data, numa); + + if (data->session_h) + return NULL; + + pool_name = format (0, "session_h_pool_numa%u%c", numa, 0); + + + elt_size = rte_cryptodev_sym_get_header_session_size (); + +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + mp = rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); +#else + /* XXX Experimental tag in DPDK 19.02 */ + mp = rte_cryptodev_sym_session_pool_create ((char *) pool_name, + DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, numa); +#endif + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create crypto session mempool"); + + data->session_h = mp; + + return NULL; +} + +static clib_error_t * +crypto_create_session_drv_pool (vlib_main_t * vm, crypto_dev_t * dev) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + struct rte_mempool *mp; + u32 elt_size; + u8 numa = dev->numa; + + data = vec_elt_at_index (dcm->data, numa); + + vec_validate (data->session_drv, dev->drv_id); + vec_validate (data->session_drv_failed, dev->drv_id); + vec_validate_aligned (data->session_by_drv_id_and_sa_index, 32, + CLIB_CACHE_LINE_BYTES); + + if (data->session_drv[dev->drv_id]) + return NULL; + + pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0); + + elt_size = rte_cryptodev_sym_get_private_session_size (dev->id); + mp = + rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); + + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create session drv mempool"); + + data->session_drv[dev->drv_id] = mp; + clib_spinlock_init (&data->lockp); + + return NULL; +} + +static clib_error_t * +crypto_create_pools (vlib_main_t * vm) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + clib_error_t *error = NULL; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_validate_aligned (dcm->data, dev->numa, CLIB_CACHE_LINE_BYTES); + + error = crypto_create_crypto_op_pool (vm, dev->numa); + if (error) + return error; + + error = crypto_create_session_h_pool (vm, dev->numa); + if (error) + return error; + + error = crypto_create_session_drv_pool (vm, dev); + if (error) + return error; + } + /* *INDENT-ON* */ + + return NULL; +} + +static void +crypto_disable (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 i; + + dcm->enabled = 0; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + rte_mempool_free (data->crypto_op); + rte_mempool_free (data->session_h); + + vec_foreach_index (i, data->session_drv) + rte_mempool_free (data->session_drv[i]); + + vec_free (data->session_drv); + clib_spinlock_free (&data->lockp); + } + /* *INDENT-ON* */ + + vec_free (dcm->data); + vec_free (dcm->workers_main); + vec_free (dcm->dev); + vec_free (dcm->resource); + vec_free (dcm->cipher_algs); + vec_free (dcm->auth_algs); +} + +static clib_error_t * +dpdk_ipsec_enable_disable (int is_enable) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_thread_main_t *tm = vlib_get_thread_main (); + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input"); + u32 skip_master = vlib_num_workers () > 0; + u32 n_mains = tm->n_vlib_mains; + u32 i; + + ASSERT (node); + for (i = skip_master; i < n_mains; i++) + vlib_node_set_state (vlib_mains[i], node->index, is_enable != 0 ? + VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED); + + return 0; +} + +static clib_error_t * +dpdk_ipsec_main_init (vlib_main_t * vm) +{ + ipsec_main_t *im = &ipsec_main; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + crypto_worker_main_t *cwm; + clib_error_t *error = NULL; + u32 skip_master, n_mains; + + n_mains = tm->n_vlib_mains; + skip_master = vlib_num_workers () > 0; + + algos_init (n_mains - skip_master); + + crypto_scan_devs (n_mains - skip_master); + + if (!(dcm->enabled)) + { + vlib_log_warn (dpdk_main.log_default, + "not enough DPDK crypto resources"); + crypto_disable (); + return 0; + } + + dcm->session_timeout = 10e9; + + vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1, + (crypto_worker_main_t) EMPTY_STRUCT, + CLIB_CACHE_LINE_BYTES); + + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + vec_validate_init_empty_aligned (cwm->ops, VLIB_FRAME_SIZE - 1, 0, + CLIB_CACHE_LINE_BYTES); + clib_memset (cwm->cipher_resource_idx, ~0, + IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx)); + clib_memset (cwm->auth_resource_idx, ~0, + IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx)); + } + /* *INDENT-ON* */ + + crypto_auto_placement (); + + error = crypto_create_pools (vm); + if (error) + { + clib_error_report (error); + crypto_disable (); + return 0; + } + + u32 idx = ipsec_register_esp_backend ( + vm, im, "dpdk backend", "dpdk-esp4-encrypt", "dpdk-esp4-encrypt-tun", + "dpdk-esp4-decrypt", "dpdk-esp4-decrypt", "dpdk-esp6-encrypt", + "dpdk-esp6-encrypt-tun", "dpdk-esp6-decrypt", "dpdk-esp6-decrypt", + "error-drop", dpdk_ipsec_check_support, add_del_sa_session, + dpdk_ipsec_enable_disable); + int rv; + if (im->esp_current_backend == ~0) + { + rv = ipsec_select_esp_backend (im, idx); + ASSERT (rv == 0); + } + return 0; +} + +VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_main_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/ipsec.h b/extras/deprecated/dpdk-ipsec/ipsec.h new file mode 100644 index 00000000000..368120e18fa --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/ipsec.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __DPDK_IPSEC_H__ +#define __DPDK_IPSEC_H__ + +#include +#include +#include +#include + +#undef always_inline +#include +#include +#include + +#if CLIB_DEBUG > 0 +#define always_inline static inline +#else +#define always_inline static inline __attribute__ ((__always_inline__)) +#endif + +#define DPDK_CRYPTO_N_QUEUE_DESC 2048 +#define DPDK_CRYPTO_NB_SESS_OBJS 20000 + +#define foreach_dpdk_crypto_input_next \ + _(DROP, "error-drop") \ + _(IP4_LOOKUP, "ip4-lookup") \ + _(IP6_LOOKUP, "ip6-lookup") \ + _(INTERFACE_OUTPUT, "interface-output") \ + _(MIDCHAIN, "adj-midchain-tx") \ + _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \ + _(DECRYPT6_POST, "dpdk-esp6-decrypt-post") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f, + foreach_dpdk_crypto_input_next +#undef _ + DPDK_CRYPTO_INPUT_N_NEXT, +} dpdk_crypto_input_next_t; + +#define MAX_QP_PER_LCORE 16 + +typedef struct +{ + u32 salt; + u32 iv[2]; + u32 cnt; +} dpdk_gcm_cnt_blk; + +typedef struct +{ + u32 next; + u32 bi; + u8 encrypt; + CLIB_ALIGN_MARK (mark0, 16); + dpdk_gcm_cnt_blk cb; + u8 aad[16]; + u8 icv[32]; /* XXX last 16B in next cache line */ +} dpdk_op_priv_t; + +typedef struct +{ + u16 *resource_idx; + struct rte_crypto_op **ops; + u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG]; + u16 auth_resource_idx[IPSEC_INTEG_N_ALG]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_worker_main_t; + +typedef struct +{ + CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */ + char *name; + enum rte_crypto_sym_xform_type type; + u32 alg; + u8 key_len; + u8 iv_len; + u8 trunc_size; + u8 boundary; + u8 disabled; + u8 resources; +} crypto_alg_t; + +typedef struct +{ + u16 *free_resources; + u16 *used_resources; + u8 cipher_support[IPSEC_CRYPTO_N_ALG]; + u8 auth_support[IPSEC_INTEG_N_ALG]; + u8 drv_id; + u8 numa; + u16 id; + const char *name; + u32 max_qp; + u64 features; +} crypto_dev_t; + +typedef struct +{ + const char *name; + u16 *devs; +} crypto_drv_t; + +typedef struct +{ + u16 thread_idx; + u8 remove; + u8 drv_id; + u8 dev_id; + u8 numa; + u16 qp_id; + u16 inflights[2]; + u16 n_ops; + u16 __unused; + struct rte_crypto_op *ops[VLIB_FRAME_SIZE]; + u32 bi[VLIB_FRAME_SIZE]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_resource_t; + +typedef struct +{ + u64 ts; + struct rte_cryptodev_sym_session *session; +} crypto_session_disposal_t; + +typedef struct +{ + struct rte_cryptodev_sym_session *session; + u64 dev_mask; + CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */ +} crypto_session_by_drv_t; + +typedef struct +{ + struct rte_mempool *crypto_op; + struct rte_mempool *session_h; + struct rte_mempool **session_drv; + crypto_session_disposal_t *session_disposal; + uword *session_by_sa_index; + u64 crypto_op_get_failed; + u64 session_h_failed; + u64 *session_drv_failed; + crypto_session_by_drv_t *session_by_drv_id_and_sa_index; + clib_spinlock_t lockp; + /* Required for vec_validate_aligned */ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_data_t; + +typedef struct +{ + crypto_worker_main_t *workers_main; + crypto_dev_t *dev; + crypto_resource_t *resource; + crypto_alg_t *cipher_algs; + crypto_alg_t *auth_algs; + crypto_data_t *data; + crypto_drv_t *drv; + u64 session_timeout; /* nsec */ + u8 enabled; +} dpdk_crypto_main_t; + +extern dpdk_crypto_main_t dpdk_crypto_main; + +static const u8 pad_data[] = + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 }; + +void crypto_auto_placement (void); + +clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session, + u32 sa_idx, crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound); + +static_always_inline u32 +crypto_op_len (void) +{ + const u32 align = 4; + u32 op_size = + sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); + + return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t); +} + +static_always_inline u32 +crypto_op_get_priv_offset (void) +{ + const u32 align = 16; + u32 offset; + + offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); + offset = (offset + align - 1) & ~(align - 1); + + return offset; +} + +static_always_inline dpdk_op_priv_t * +crypto_op_get_priv (struct rte_crypto_op * op) +{ + return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ()); +} + + +static_always_inline void +add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session, + crypto_data_t * data, u32 drv_id, u32 sa_idx) +{ + crypto_session_by_drv_t *sbd; + vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx, + CLIB_CACHE_LINE_BYTES); + sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); + sbd->dev_mask |= 1L << drv_id; + sbd->session = session; +} + +static_always_inline struct rte_cryptodev_sym_session * +get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx) +{ + crypto_session_by_drv_t *sess_by_sa; + if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx) + return NULL; + sess_by_sa = + vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); + return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL; +} + +static_always_inline clib_error_t * +crypto_get_session (struct rte_cryptodev_sym_session ** session, + u32 sa_idx, + crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + struct rte_cryptodev_sym_session *sess; + + data = vec_elt_at_index (dcm->data, res->numa); + sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx); + + if (PREDICT_FALSE (!sess)) + return create_sym_session (session, sa_idx, res, cwm, is_outbound); + + session[0] = sess; + + return NULL; +} + +static_always_inline u16 +get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa) +{ + u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg]; + u16 auth_res = cwm->auth_resource_idx[sa->integ_alg]; + u8 is_aead; + + /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */ + + is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)); + + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE) + return auth_res; + + if (cipher_res == auth_res) + return cipher_res; + + if (is_aead) + return cipher_res; + + return (u16) ~ 0; +} + +static_always_inline i32 +crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data = vec_elt_at_index (dcm->data, numa); + i32 ret; + + ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n); + + /* *INDENT-OFF* */ + data->crypto_op_get_failed += ! !ret; + /* *INDENT-ON* */ + + return ret; +} + +static_always_inline void +crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data = vec_elt_at_index (dcm->data, numa); + + if (!n) + return; + + rte_mempool_put_bulk (data->crypto_op, (void **) ops, n); +} + +static_always_inline void +crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, + u32 node_index, u32 error, u8 numa, u8 encrypt) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res; + u16 *res_idx; + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + u16 enq, n_ops; + res = vec_elt_at_index (dcm->resource, res_idx[0]); + + if (!res->n_ops) + continue; + + n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt]; + n_ops = res->n_ops < n_ops ? res->n_ops : n_ops; + enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id, + res->ops, n_ops); + ASSERT (n_ops == enq); + res->inflights[encrypt] += enq; + + if (PREDICT_FALSE (enq < res->n_ops)) + { + crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq); + vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq); + + vlib_node_increment_counter (vm, node_index, error, + res->n_ops - enq); + } + res->n_ops = 0; + } + /* *INDENT-ON* */ +} + +static_always_inline void +crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi) +{ + icb->salt = salt; + icb->iv[0] = seq; + icb->iv[1] = seq_hi; +} + +static_always_inline void +crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0, + struct rte_crypto_op *op, void *session, + u32 cipher_off, u32 cipher_len, + u32 auth_off, u32 auth_len, + u8 * aad, u8 * digest, u64 digest_paddr) +{ + struct rte_crypto_sym_op *sym_op; + + sym_op = (struct rte_crypto_sym_op *) (op + 1); + + sym_op->m_src = mb0; + sym_op->session = session; + + if (is_aead) + { + sym_op->aead.data.offset = cipher_off; + sym_op->aead.data.length = cipher_len; + + sym_op->aead.aad.data = aad; + sym_op->aead.aad.phys_addr = + op->phys_addr + (uintptr_t) aad - (uintptr_t) op; + + sym_op->aead.digest.data = digest; + sym_op->aead.digest.phys_addr = digest_paddr; + } + else + { + sym_op->cipher.data.offset = cipher_off; + sym_op->cipher.data.length = cipher_len; + + sym_op->auth.data.offset = auth_off; + sym_op->auth.data.length = auth_len; + + sym_op->auth.digest.data = digest; + sym_op->auth.digest.phys_addr = digest_paddr; + } +} + +#endif /* __DPDK_IPSEC_H__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/dpdk/CMakeLists.txt b/src/plugins/dpdk/CMakeLists.txt index 7db6b094be7..6ec48578852 100644 --- a/src/plugins/dpdk/CMakeLists.txt +++ b/src/plugins/dpdk/CMakeLists.txt @@ -134,24 +134,15 @@ add_vpp_plugin(dpdk device/format.c device/init.c device/node.c - ipsec/cli.c - ipsec/crypto_node.c - ipsec/esp_decrypt.c - ipsec/esp_encrypt.c - ipsec/ipsec.c cryptodev/${DPDK_CRYPTODEV_SOURCE}.c MULTIARCH_SOURCES buffer.c device/device.c device/node.c - ipsec/crypto_node.c - ipsec/esp_decrypt.c - ipsec/esp_encrypt.c INSTALL_HEADERS device/dpdk.h - ipsec/ipsec.h LINK_FLAGS "${DPDK_LINK_FLAGS}" diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c index f51a5a527dc..d87a16c37a0 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev.c +++ b/src/plugins/dpdk/cryptodev/cryptodev.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -108,7 +108,7 @@ typedef enum typedef struct { - struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES]; + struct rte_cryptodev_sym_session ***keys; } cryptodev_key_t; typedef struct @@ -120,6 +120,7 @@ typedef struct typedef struct { + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); struct rte_mempool *cop_pool; struct rte_mempool *sess_pool; struct rte_mempool *sess_priv_pool; @@ -148,10 +149,10 @@ typedef struct cryptodev_main_t cryptodev_main; -static int +static_always_inline int prepare_aead_xform (struct rte_crypto_sym_xform *xform, - cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key, u32 aad_len) + cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, + u32 aad_len) { struct rte_crypto_aead_xform *aead_xform = &xform->aead; memset (xform, 0, sizeof (*xform)); @@ -176,10 +177,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform, return 0; } -static int +static_always_inline int prepare_linked_xform (struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key) + const vnet_crypto_key_t *key) { struct rte_crypto_sym_xform *xform_cipher, *xform_auth; vnet_crypto_key_t *key_cipher, *key_auth; @@ -240,57 +241,7 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms, return 0; } -static int -cryptodev_session_create (vnet_crypto_key_t * const key, - struct rte_mempool *sess_priv_pool, - cryptodev_key_t * session_pair, u32 aad_len) -{ - struct rte_crypto_sym_xform xforms_enc[2] = { {0} }; - struct rte_crypto_sym_xform xforms_dec[2] = { {0} }; - cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_inst_t *dev_inst; - struct rte_cryptodev *cdev; - int ret; - uint8_t dev_id = 0; - - if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) - ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key); - else - ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, - aad_len); - if (ret) - return 0; - - if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) - prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key); - else - prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len); - - vec_foreach (dev_inst, cmt->cryptodev_inst) - { - dev_id = dev_inst->dev_id; - cdev = rte_cryptodev_pmd_get_dev (dev_id); - - /* if the session is already configured for the driver type, avoid - configuring it again to increase the session data's refcnt */ - if (session_pair->keys[0]->sess_data[cdev->driver_id].data && - session_pair->keys[1]->sess_data[cdev->driver_id].data) - continue; - - ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0], - xforms_enc, sess_priv_pool); - ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1], - xforms_dec, sess_priv_pool); - if (ret < 0) - return ret; - } - session_pair->keys[0]->opaque_data = aad_len; - session_pair->keys[1]->opaque_data = aad_len; - - return 0; -} - -static void +static_always_inline void cryptodev_session_del (struct rte_cryptodev_sym_session *sess) { u32 n_devs, i; @@ -306,8 +257,8 @@ cryptodev_session_del (struct rte_cryptodev_sym_session *sess) rte_cryptodev_sym_session_free (sess); } -static int -cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key) +static_always_inline int +cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key) { vnet_crypto_alg_t alg; if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) @@ -324,77 +275,137 @@ cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key) return -1; } -static_always_inline void -cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, - vnet_crypto_key_index_t idx, u32 aad_len) +static_always_inline int +cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx, + u32 aad_len) { cryptodev_main_t *cmt = &cryptodev_main; cryptodev_numa_data_t *numa_data; + cryptodev_inst_t *dev_inst; vnet_crypto_key_t *key = vnet_crypto_get_key (idx); struct rte_mempool *sess_pool, *sess_priv_pool; - cryptodev_key_t *ckey = 0; - int ret = 0; - - if (kop == VNET_CRYPTO_KEY_OP_DEL) - { - if (idx >= vec_len (cmt->keys)) - return; - - ckey = pool_elt_at_index (cmt->keys, idx); - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - ckey->keys[0] = 0; - ckey->keys[1] = 0; - pool_put (cmt->keys, ckey); - return; - } - else if (kop == VNET_CRYPTO_KEY_OP_MODIFY) - { - if (idx >= vec_len (cmt->keys)) - return; - - ckey = pool_elt_at_index (cmt->keys, idx); - - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - ckey->keys[0] = 0; - ckey->keys[1] = 0; - } - else /* create key */ - pool_get_zero (cmt->keys, ckey); - - /* do not create session for unsupported alg */ - if (cryptodev_check_supported_vnet_alg (key)) - return; + cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx); + struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } }; + struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } }; + struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 }; + u32 numa_node = vm->numa_node; + int ret; - numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node); + numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node); sess_pool = numa_data->sess_pool; sess_priv_pool = numa_data->sess_priv_pool; - ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[0]) + sessions[CRYPTODEV_OP_TYPE_ENCRYPT] = + rte_cryptodev_sym_session_create (sess_pool); + if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT]) { ret = -1; goto clear_key; } - ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[1]) + sessions[CRYPTODEV_OP_TYPE_DECRYPT] = + rte_cryptodev_sym_session_create (sess_pool); + if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT]) { ret = -1; goto clear_key; } - ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len); + if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key); + else + ret = + prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len); + if (ret) + return 0; + + if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) + prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key); + else + prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len); + + vec_foreach (dev_inst, cmt->cryptodev_inst) + { + u32 dev_id = dev_inst->dev_id; + struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id); + + /* if the session is already configured for the driver type, avoid + configuring it again to increase the session data's refcnt */ + if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT] + ->sess_data[cdev->driver_id] + .data && + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data) + continue; + + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, + sess_priv_pool); + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, + sess_priv_pool); + if (ret < 0) + return ret; + } + + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len; + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len; + + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] = + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]; + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] = + sessions[CRYPTODEV_OP_TYPE_DECRYPT]; clear_key: if (ret != 0) { - cryptodev_session_del (ckey->keys[0]); - cryptodev_session_del (ckey->keys[1]); - memset (ckey, 0, sizeof (*ckey)); - pool_put (cmt->keys, ckey); + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]); + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]); + } + return ret; +} + +static_always_inline void +cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop, + vnet_crypto_key_index_t idx, u32 aad_len) +{ + cryptodev_main_t *cmt = &cryptodev_main; + vnet_crypto_key_t *key = vnet_crypto_get_key (idx); + cryptodev_key_t *ckey = 0; + u32 i; + + vec_validate (cmt->keys, idx); + ckey = vec_elt_at_index (cmt->keys, idx); + + if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY) + { + if (idx >= vec_len (cmt->keys)) + return; + + vec_foreach_index (i, cmt->per_numa_data) + { + if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]) + { + cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]); + cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]); + + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0; + ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0; + } + } + return; } + + /* create key */ + + /* do not create session for unsupported alg */ + if (cryptodev_check_supported_vnet_alg (key)) + return; + + vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1); + vec_foreach_index (i, ckey->keys) + vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1); } /*static*/ void @@ -474,11 +485,11 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node; cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; vnet_crypto_async_frame_elt_t *fe; + struct rte_cryptodev_sym_session *sess = 0; cryptodev_op_t **cop; u32 *bi; u32 n_enqueue, n_elts; - cryptodev_key_t *key; - u32 last_key_index; + u32 last_key_index = ~0; if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0)) return -1; @@ -505,9 +516,6 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, cop[0]->frame = frame; cop[0]->n_elts = n_elts; - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; - while (n_elts) { vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]); @@ -525,8 +533,20 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, } if (last_key_index != fe->key_index) { - key = pool_elt_at_index (cmt->keys, fe->key_index); + cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index); last_key_index = fe->key_index; + + if (key->keys[vm->numa_node][op_type] == 0) + { + if (PREDICT_FALSE ( + cryptodev_session_create (vm, last_key_index, 0) < 0)) + { + cryptodev_mark_frame_err_status ( + frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); + return -1; + } + } + sess = key->keys[vm->numa_node][op_type]; } sop->m_src = rte_mbuf_from_vlib_buffer (b); @@ -542,7 +562,7 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, integ_offset = 0; crypto_offset = offset_diff; } - sop->session = key->keys[op_type]; + sop->session = sess; sop->cipher.data.offset = crypto_offset; sop->cipher.data.length = fe->crypto_total_length; sop->auth.data.offset = integ_offset; @@ -586,12 +606,11 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node; cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; vnet_crypto_async_frame_elt_t *fe; + struct rte_cryptodev_sym_session *sess = 0; cryptodev_op_t **cop; u32 *bi; u32 n_enqueue = 0, n_elts; - cryptodev_key_t *key; - u32 last_key_index; - u8 sess_aad_len; + u32 last_key_index = ~0; if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0)) return -1; @@ -618,13 +637,6 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, cop[0]->frame = frame; cop[0]->n_elts = n_elts; - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; - sess_aad_len = (u8) key->keys[op_type]->opaque_data; - if (PREDICT_FALSE (sess_aad_len != aad_len)) - cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY, - fe->key_index, aad_len); - while (n_elts) { vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]); @@ -640,14 +652,35 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, } if (last_key_index != fe->key_index) { - key = pool_elt_at_index (cmt->keys, fe->key_index); - sess_aad_len = (u8) key->keys[op_type]->opaque_data; - if (PREDICT_FALSE (sess_aad_len != aad_len)) + cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index); + + last_key_index = fe->key_index; + if (key->keys[vm->numa_node][op_type] == 0) { - cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY, + if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index, + aad_len) < 0)) + { + cryptodev_mark_frame_err_status ( + frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); + return -1; + } + } + else if (PREDICT_FALSE ( + key->keys[vm->numa_node][op_type]->opaque_data != + aad_len)) + { + cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL, fe->key_index, aad_len); + if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index, + aad_len) < 0)) + { + cryptodev_mark_frame_err_status ( + frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); + return -1; + } } - last_key_index = fe->key_index; + + sess = key->keys[vm->numa_node][op_type]; } sop->m_src = rte_mbuf_from_vlib_buffer (b); @@ -662,7 +695,7 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, crypto_offset = 0; } - sop->session = key->keys[op_type]; + sop->session = sess; sop->aead.aad.data = cop[0]->aad; sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET; sop->aead.data.length = fe->crypto_total_length; @@ -1072,12 +1105,7 @@ cryptodev_count_queue (u32 numa) for (i = 0; i < n_cryptodev; i++) { rte_cryptodev_info_get (i, &info); - if (rte_cryptodev_socket_id (i) != numa) - { - clib_warning ("DPDK crypto resource %s is in different numa node " - "as %u, ignored", info.device->name, numa); - continue; - } + /* only device support symmetric crypto is used */ if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO)) continue; @@ -1093,8 +1121,6 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id) struct rte_cryptodev_info info; struct rte_cryptodev *cdev; cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data, - vm->numa_node); u32 i; int ret; @@ -1116,7 +1142,7 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id) { struct rte_cryptodev_config cfg; - cfg.socket_id = vm->numa_node; + cfg.socket_id = info.device->numa_node; cfg.nb_queue_pairs = info.max_nb_queue_pairs; rte_cryptodev_configure (cryptodev_id, &cfg); @@ -1127,12 +1153,12 @@ cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id) int ret; - qp_cfg.mp_session = numa_data->sess_pool; - qp_cfg.mp_session_private = numa_data->sess_priv_pool; + qp_cfg.mp_session = 0; + qp_cfg.mp_session_private = 0; qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS; ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg, - vm->numa_node); + info.device->numa_node); if (ret) break; } @@ -1275,58 +1301,6 @@ dpdk_cryptodev_init (vlib_main_t * vm) /* A total of 4 times n_worker threads * frame size as crypto ops */ n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS); - vec_validate (cmt->per_numa_data, vm->numa_node); - numa_data = vec_elt_at_index (cmt->per_numa_data, numa); - - /* create session pool for the numa node */ - name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0); - mp = rte_cryptodev_sym_session_pool_create ((char *) name, - CRYPTODEV_NB_SESSION, - 0, 0, 0, numa); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - goto err_handling; - } - vec_free (name); - - numa_data->sess_pool = mp; - - /* create session private pool for the numa node */ - name = format (0, "cryptodev_sess_pool_%u%c", numa, 0); - mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0, - 0, NULL, NULL, NULL, NULL, numa, 0); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); - goto err_handling; - } - - vec_free (name); - - numa_data->sess_priv_pool = mp; - - /* create cryptodev op pool */ - name = format (0, "cryptodev_op_pool_%u%c", numa, 0); - - mp = rte_mempool_create ((char *) name, n_cop_elts, - sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2, - sizeof (struct rte_crypto_op_pool_private), NULL, - NULL, crypto_op_init, NULL, numa, 0); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); - goto err_handling; - } - - priv = rte_mempool_get_priv (mp); - priv->priv_size = sizeof (struct rte_crypto_op_pool_private); - priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - vec_free (name); - numa_data->cop_pool = mp; - /* probe all cryptodev devices and get queue info */ if (cryptodev_probe (vm, n_workers) < 0) { @@ -1342,6 +1316,7 @@ dpdk_cryptodev_init (vlib_main_t * vm) for (i = skip_master; i < tm->n_vlib_mains; i++) { ptd = cmt->per_thread_data + i; + cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO); name = format (0, "frames_ring_%u%c", i, 0); ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS, @@ -1354,11 +1329,67 @@ dpdk_cryptodev_init (vlib_main_t * vm) } vec_validate (ptd->cops, VNET_CRYPTO_FRAME_SIZE - 1); vec_free(name); + + numa = vlib_mains[i]->numa_node; + + vec_validate (cmt->per_numa_data, numa); + numa_data = vec_elt_at_index (cmt->per_numa_data, numa); + + if (numa_data->sess_pool) + continue; + + /* create session pool for the numa node */ + name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0); + mp = rte_cryptodev_sym_session_pool_create ( + (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa); + if (!mp) + { + error = clib_error_return (0, "Not enough memory for mp %s", name); + goto err_handling; + } + vec_free (name); + + numa_data->sess_pool = mp; + + /* create session private pool for the numa node */ + name = format (0, "cryptodev_sess_pool_%u%c", numa, 0); + mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0, + 0, NULL, NULL, NULL, NULL, numa, 0); + if (!mp) + { + error = clib_error_return (0, "Not enough memory for mp %s", name); + vec_free (name); + goto err_handling; + } + + vec_free (name); + + numa_data->sess_priv_pool = mp; + + /* create cryptodev op pool */ + name = format (0, "cryptodev_op_pool_%u%c", numa, 0); + + mp = rte_mempool_create ((char *) name, n_cop_elts, + sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2, + sizeof (struct rte_crypto_op_pool_private), + NULL, NULL, crypto_op_init, NULL, numa, 0); + if (!mp) + { + error = clib_error_return (0, "Not enough memory for mp %s", name); + vec_free (name); + goto err_handling; + } + + priv = rte_mempool_get_priv (mp); + priv->priv_size = sizeof (struct rte_crypto_op_pool_private); + priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + vec_free (name); + numa_data->cop_pool = mp; } /* register handler */ - eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79, - "DPDK Cryptodev Engine"); + eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100, + "DPDK Cryptodev Engine"); #define _(a, b, c, d, e, f) \ vnet_crypto_register_async_handler \ @@ -1388,6 +1419,12 @@ dpdk_cryptodev_init (vlib_main_t * vm) vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler); + /* this engine is only enabled when cryptodev device(s) are presented in + * startup.conf. Assume it is wanted to be used, turn on async mode here. + */ + vnet_crypto_request_async_mode (1); + ipsec_set_async_mode (1); + return 0; err_handling: diff --git a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c index 0a33d25bd61..420bb89dc0a 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c +++ b/src/plugins/dpdk/cryptodev/cryptodev_dp_api.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -84,14 +84,13 @@ typedef enum typedef struct { - union rte_cryptodev_session_ctx keys[CRYPTODEV_N_OP_TYPES]; + union rte_cryptodev_session_ctx **keys; } cryptodev_key_t; typedef struct { u32 dev_id; u32 q_id; - struct rte_crypto_raw_dp_ctx *raw_dp_ctx_buffer; char *desc; } cryptodev_inst_t; @@ -114,6 +113,7 @@ typedef struct u16 cryptodev_id; u16 cryptodev_q; u16 inflight; + union rte_cryptodev_session_ctx reset_sess; /* session data for reset ctx */ } cryptodev_engine_thread_t; typedef struct @@ -129,10 +129,10 @@ typedef struct cryptodev_main_t cryptodev_main; -static int +static_always_inline int prepare_aead_xform (struct rte_crypto_sym_xform *xform, - cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key, u32 aad_len) + cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, + u32 aad_len) { struct rte_crypto_aead_xform *aead_xform = &xform->aead; memset (xform, 0, sizeof (*xform)); @@ -157,10 +157,10 @@ prepare_aead_xform (struct rte_crypto_sym_xform *xform, return 0; } -static int +static_always_inline int prepare_linked_xform (struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, - const vnet_crypto_key_t * key) + const vnet_crypto_key_t *key) { struct rte_crypto_sym_xform *xform_cipher, *xform_auth; vnet_crypto_key_t *key_cipher, *key_auth; @@ -221,18 +221,57 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms, return 0; } -static int -cryptodev_session_create (vnet_crypto_key_t * const key, - struct rte_mempool *sess_priv_pool, - cryptodev_key_t * session_pair, u32 aad_len) +static_always_inline void +cryptodev_session_del (struct rte_cryptodev_sym_session *sess) +{ + u32 n_devs, i; + + if (sess == NULL) + return; + + n_devs = rte_cryptodev_count (); + + for (i = 0; i < n_devs; i++) + rte_cryptodev_sym_session_clear (i, sess); + + rte_cryptodev_sym_session_free (sess); +} + +static_always_inline int +cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx, + u32 aad_len) { - struct rte_crypto_sym_xform xforms_enc[2] = { {0} }; - struct rte_crypto_sym_xform xforms_dec[2] = { {0} }; cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_numa_data_t *numa_data; cryptodev_inst_t *dev_inst; - struct rte_cryptodev *cdev; + vnet_crypto_key_t *key = vnet_crypto_get_key (idx); + struct rte_mempool *sess_pool, *sess_priv_pool; + cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx); + struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } }; + struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } }; + struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 }; + u32 numa_node = vm->numa_node; int ret; - uint8_t dev_id = 0; + + numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node); + sess_pool = numa_data->sess_pool; + sess_priv_pool = numa_data->sess_priv_pool; + + sessions[CRYPTODEV_OP_TYPE_ENCRYPT] = + rte_cryptodev_sym_session_create (sess_pool); + if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT]) + { + ret = -1; + goto clear_key; + } + + sessions[CRYPTODEV_OP_TYPE_DECRYPT] = + rte_cryptodev_sym_session_create (sess_pool); + if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT]) + { + ret = -1; + goto clear_key; + } if (key->type == VNET_CRYPTO_KEY_TYPE_LINK) ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key); @@ -249,44 +288,39 @@ cryptodev_session_create (vnet_crypto_key_t * const key, vec_foreach (dev_inst, cmt->cryptodev_inst) { - dev_id = dev_inst->dev_id; - cdev = rte_cryptodev_pmd_get_dev (dev_id); + u32 dev_id = dev_inst->dev_id; + struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id); /* if the session is already configured for the driver type, avoid configuring it again to increase the session data's refcnt */ - if (session_pair->keys[0].crypto_sess->sess_data[cdev->driver_id].data && - session_pair->keys[1].crypto_sess->sess_data[cdev->driver_id].data) + if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->sess_data[cdev->driver_id].data && + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data) continue; - ret = rte_cryptodev_sym_session_init (dev_id, - session_pair->keys[0].crypto_sess, - xforms_enc, sess_priv_pool); - ret = rte_cryptodev_sym_session_init (dev_id, - session_pair->keys[1].crypto_sess, - xforms_dec, sess_priv_pool); + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc, sess_priv_pool); + ret = rte_cryptodev_sym_session_init ( + dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec, sess_priv_pool); if (ret < 0) return ret; } - session_pair->keys[0].crypto_sess->opaque_data = aad_len; - session_pair->keys[1].crypto_sess->opaque_data = aad_len; - return 0; -} + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len; + sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len; -static void -cryptodev_session_del (struct rte_cryptodev_sym_session *sess) -{ - u32 n_devs, i; - - if (sess == NULL) - return; - - n_devs = rte_cryptodev_count (); + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = + sessions[CRYPTODEV_OP_TYPE_ENCRYPT]; + ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = + sessions[CRYPTODEV_OP_TYPE_DECRYPT]; - for (i = 0; i < n_devs; i++) - rte_cryptodev_sym_session_clear (i, sess); - - rte_cryptodev_sym_session_free (sess); +clear_key: + if (ret != 0) + { + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]); + cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]); + } + return ret; } static int @@ -312,72 +346,44 @@ cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len) { cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data; vnet_crypto_key_t *key = vnet_crypto_get_key (idx); - struct rte_mempool *sess_pool, *sess_priv_pool; cryptodev_key_t *ckey = 0; - int ret = 0; + u32 i; + + vec_validate (cmt->keys, idx); + ckey = vec_elt_at_index (cmt->keys, idx); - if (kop == VNET_CRYPTO_KEY_OP_DEL) + if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY) { if (idx >= vec_len (cmt->keys)) return; - ckey = pool_elt_at_index (cmt->keys, idx); - cryptodev_session_del (ckey->keys[0].crypto_sess); - cryptodev_session_del (ckey->keys[1].crypto_sess); - ckey->keys[0].crypto_sess = 0; - ckey->keys[1].crypto_sess = 0; - pool_put (cmt->keys, ckey); + vec_foreach_index (i, cmt->per_numa_data) + { + if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess) + { + cryptodev_session_del ( + ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess); + cryptodev_session_del ( + ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess); + + CLIB_MEMORY_STORE_BARRIER (); + ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT].crypto_sess = 0; + ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT].crypto_sess = 0; + } + } return; } - else if (kop == VNET_CRYPTO_KEY_OP_MODIFY) - { - if (idx >= vec_len (cmt->keys)) - return; - - ckey = pool_elt_at_index (cmt->keys, idx); - cryptodev_session_del (ckey->keys[0].crypto_sess); - cryptodev_session_del (ckey->keys[1].crypto_sess); - ckey->keys[0].crypto_sess = 0; - ckey->keys[1].crypto_sess = 0; - } - else /* create key */ - pool_get_zero (cmt->keys, ckey); + /* create key */ /* do not create session for unsupported alg */ if (cryptodev_check_supported_vnet_alg (key)) return; - numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node); - sess_pool = numa_data->sess_pool; - sess_priv_pool = numa_data->sess_priv_pool; - - ckey->keys[0].crypto_sess = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[0].crypto_sess) - { - ret = -1; - goto clear_key; - } - - ckey->keys[1].crypto_sess = rte_cryptodev_sym_session_create (sess_pool); - if (!ckey->keys[1].crypto_sess) - { - ret = -1; - goto clear_key; - } - - ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len); - -clear_key: - if (ret != 0) - { - cryptodev_session_del (ckey->keys[0].crypto_sess); - cryptodev_session_del (ckey->keys[1].crypto_sess); - memset (ckey, 0, sizeof (*ckey)); - pool_put (cmt->keys, ckey); - } + vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1); + vec_foreach_index (i, ckey->keys) + vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1); } /*static*/ void @@ -449,13 +455,12 @@ compute_ofs_linked_alg (vnet_crypto_async_frame_elt_t * fe, i16 * min_ofs, return ofs.raw; } -/* Reset cryptodev dp context to previous queue pair state */ static_always_inline void -cryptodev_reset_ctx (u16 cdev_id, u16 qid, struct rte_crypto_raw_dp_ctx *ctx) +cryptodev_reset_ctx (cryptodev_engine_thread_t *cet) { - union rte_cryptodev_session_ctx session_ctx = {.crypto_sess = NULL }; - - rte_cryptodev_configure_raw_dp_ctx (cdev_id, qid, ctx, ~0, session_ctx, 0); + rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q, + cet->ctx, RTE_CRYPTO_OP_WITH_SESSION, + cet->reset_sess, 0); } static_always_inline int @@ -470,7 +475,6 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, struct rte_crypto_va_iova_ptr iv_vec, digest_vec; vlib_buffer_t **b; u32 n_elts; - cryptodev_key_t *key; u32 last_key_index = ~0; i16 min_ofs; u32 max_end; @@ -506,20 +510,24 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, if (PREDICT_FALSE (last_key_index != fe->key_index)) { - key = pool_elt_at_index (cmt->keys, fe->key_index); - last_key_index = fe->key_index; + cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index); - if (PREDICT_FALSE - (rte_cryptodev_configure_raw_dp_ctx - (cet->cryptodev_id, cet->cryptodev_q, cet->ctx, - RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0)) + if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess == + 0)) { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, - cet->ctx); - return -1; + status = cryptodev_session_create (vm, fe->key_index, 0); + if (PREDICT_FALSE (status < 0)) + goto error_exit; } + + status = rte_cryptodev_configure_raw_dp_ctx ( + cet->cryptodev_id, cet->cryptodev_q, cet->ctx, + RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type], + /*is_update */ 1); + if (PREDICT_FALSE (status < 0)) + goto error_exit; + + last_key_index = fe->key_index; } cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end); @@ -547,27 +555,15 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)) { vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs; - if (cryptodev_frame_build_sgl - (vm, cmt->iova_mode, vec, &n_seg, b[0], - max_end - min_ofs - vec->len) < 0) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, - cet->ctx); - return -1; - } + if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0], + max_end - min_ofs - vec->len) < 0) + goto error_exit; } status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec, &digest_vec, 0, (void *) frame); - if (status < 0) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx); - return -1; - } + if (PREDICT_FALSE (status < 0)) + goto error_exit; b++; fe++; @@ -577,12 +573,18 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm, status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts); if (PREDICT_FALSE (status < 0)) { - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx); + cryptodev_reset_ctx (cet); return -1; } cet->inflight += frame->n_elts; return 0; + +error_exit: + cryptodev_mark_frame_err_status (frame, + VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); + cryptodev_reset_ctx (cet); + return -1; } static_always_inline int @@ -595,15 +597,12 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, vnet_crypto_async_frame_elt_t *fe; vlib_buffer_t **b; u32 n_elts; - cryptodev_key_t *key; - u32 last_key_index = ~0; union rte_crypto_sym_ofs cofs; struct rte_crypto_vec *vec; struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec; - u8 sess_aad_len = 0; + u32 last_key_index = ~0; int status; - n_elts = frame->n_elts; if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts)) @@ -631,28 +630,36 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, vlib_prefetch_buffer_header (b[1], LOAD); } - if (last_key_index != fe->key_index) + if (PREDICT_FALSE (last_key_index != fe->key_index)) { - key = pool_elt_at_index (cmt->keys, fe->key_index); - sess_aad_len = (u8) key->keys[op_type].crypto_sess->opaque_data; - if (PREDICT_FALSE (sess_aad_len != aad_len)) + cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index); + + if (PREDICT_FALSE (key->keys[vm->numa_node][op_type].crypto_sess == + 0)) { - cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY, - fe->key_index, aad_len); + status = cryptodev_session_create (vm, fe->key_index, aad_len); + if (PREDICT_FALSE (status < 0)) + goto error_exit; } - last_key_index = fe->key_index; - if (PREDICT_FALSE - (rte_cryptodev_configure_raw_dp_ctx - (cet->cryptodev_id, cet->cryptodev_q, cet->ctx, - RTE_CRYPTO_OP_WITH_SESSION, key->keys[op_type], 1) < 0)) + if (PREDICT_FALSE ((u8) key->keys[vm->numa_node][op_type] + .crypto_sess->opaque_data != aad_len)) { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, - cet->ctx); - return -1; + cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL, + fe->key_index, aad_len); + status = cryptodev_session_create (vm, fe->key_index, aad_len); + if (PREDICT_FALSE (status < 0)) + goto error_exit; } + + status = rte_cryptodev_configure_raw_dp_ctx ( + cet->cryptodev_id, cet->cryptodev_q, cet->ctx, + RTE_CRYPTO_OP_WITH_SESSION, key->keys[vm->numa_node][op_type], + /*is_update */ 1); + if (PREDICT_FALSE (status < 0)) + goto error_exit; + + last_key_index = fe->key_index; } if (cmt->iova_mode == RTE_IOVA_VA) @@ -692,31 +699,21 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)) { - vec[0].len = b[0]->current_data + - b[0]->current_length - fe->crypto_start_offset; - if (cryptodev_frame_build_sgl - (vm, cmt->iova_mode, vec, &n_seg, b[0], - fe->crypto_total_length - vec[0].len) < 0) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, - cet->ctx); - return -1; - } + vec[0].len = b[0]->current_data + b[0]->current_length - + fe->crypto_start_offset; + status = + cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0], + fe->crypto_total_length - vec[0].len); + if (status < 0) + goto error_exit; } status = - rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, - &iv_vec, &digest_vec, &aad_vec, - (void *) frame); + rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec, + &digest_vec, &aad_vec, (void *) frame); if (PREDICT_FALSE (status < 0)) - { - cryptodev_mark_frame_err_status (frame, - VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx); - return -1; - } + goto error_exit; + fe++; b++; n_elts--; @@ -724,14 +721,17 @@ cryptodev_frame_gcm_enqueue (vlib_main_t * vm, status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts); if (PREDICT_FALSE (status < 0)) - { - cryptodev_reset_ctx (cet->cryptodev_id, cet->cryptodev_q, cet->ctx); - return -1; - } + goto error_exit; cet->inflight += frame->n_elts; return 0; + +error_exit: + cryptodev_mark_frame_err_status (frame, + VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR); + cryptodev_reset_ctx (cet); + return -1; } static u32 @@ -999,7 +999,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet, cinst = vec_elt_at_index (cmt->cryptodev_inst, idx); cet->cryptodev_id = cinst->dev_id; cet->cryptodev_q = cinst->q_id; - cet->ctx = cinst->raw_dp_ctx_buffer; + cryptodev_reset_ctx (cet); clib_spinlock_unlock (&cmt->tlock); break; case CRYPTODEV_RESOURCE_ASSIGN_UPDATE: @@ -1024,7 +1024,7 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet, cinst = cmt->cryptodev_inst + cryptodev_inst_index; cet->cryptodev_id = cinst->dev_id; cet->cryptodev_q = cinst->q_id; - cet->ctx = cinst->raw_dp_ctx_buffer; + cryptodev_reset_ctx (cet); clib_spinlock_unlock (&cmt->tlock); break; default: @@ -1214,12 +1214,6 @@ cryptodev_count_queue (u32 numa) for (i = 0; i < n_cryptodev; i++) { rte_cryptodev_info_get (i, &info); - if (rte_cryptodev_socket_id (i) != numa) - { - clib_warning ("DPDK crypto resource %s is in different numa node " - "as %u, ignored", info.device->name, numa); - continue; - } q_count += info.max_nb_queue_pairs; } @@ -1229,16 +1223,12 @@ cryptodev_count_queue (u32 numa) static int cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id) { + struct rte_cryptodev_config cfg; struct rte_cryptodev_info info; - struct rte_cryptodev *cdev; cryptodev_main_t *cmt = &cryptodev_main; - cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data, - vm->numa_node); - u32 dp_size = 0; u32 i; int ret; - cdev = rte_cryptodev_pmd_get_dev (cryptodev_id); rte_cryptodev_info_get (cryptodev_id, &info); if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) @@ -1246,46 +1236,41 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id) ret = check_cryptodev_alg_support (cryptodev_id); if (ret != 0) - return ret; + { + clib_warning ( + "Cryptodev: device %u does not support required algorithms", + cryptodev_id); + return ret; + } + cfg.socket_id = info.device->numa_node; + cfg.nb_queue_pairs = info.max_nb_queue_pairs; + rte_cryptodev_configure (cryptodev_id, &cfg); - /** If the device is already started, we reuse it, otherwise configure - * both the device and queue pair. - **/ - if (!cdev->data->dev_started) + for (i = 0; i < info.max_nb_queue_pairs; i++) { - struct rte_cryptodev_config cfg; + struct rte_cryptodev_qp_conf qp_cfg; - cfg.socket_id = vm->numa_node; - cfg.nb_queue_pairs = info.max_nb_queue_pairs; + qp_cfg.mp_session = 0; + qp_cfg.mp_session_private = 0; + qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS; - rte_cryptodev_configure (cryptodev_id, &cfg); - - for (i = 0; i < info.max_nb_queue_pairs; i++) + ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg, + info.device->numa_node); + if (ret) { - struct rte_cryptodev_qp_conf qp_cfg; - - qp_cfg.mp_session = numa_data->sess_pool; - qp_cfg.mp_session_private = numa_data->sess_priv_pool; - qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS; - - ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg, - vm->numa_node); - if (ret) - break; + clib_warning ("Cryptodev: Configure device %u queue %u failed %d", + cryptodev_id, i, ret); + break; } - if (i != info.max_nb_queue_pairs) - return -1; - - /* start the device */ - rte_cryptodev_start (i); } - ret = rte_cryptodev_get_raw_dp_ctx_size (cryptodev_id); - if (ret < 0) + if (i != info.max_nb_queue_pairs) return -1; - dp_size = ret; + + /* start the device */ + rte_cryptodev_start (cryptodev_id); for (i = 0; i < info.max_nb_queue_pairs; i++) { @@ -1294,9 +1279,6 @@ cryptodev_configure (vlib_main_t *vm, u32 cryptodev_id) cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10); cdev_inst->dev_id = cryptodev_id; cdev_inst->q_id = i; - vec_validate_aligned (cdev_inst->raw_dp_ctx_buffer, dp_size, 8); - cryptodev_reset_ctx (cdev_inst->dev_id, cdev_inst->q_id, - cdev_inst->raw_dp_ctx_buffer); snprintf (cdev_inst->desc, strlen (info.device->name) + 9, "%s_q%u", info.device->name, i); @@ -1345,22 +1327,24 @@ cryptodev_probe (vlib_main_t *vm, u32 n_workers) return 0; } -static int -cryptodev_get_session_sz (vlib_main_t *vm, u32 n_workers) +static void +cryptodev_get_max_sz (u32 *max_sess_sz, u32 *max_dp_sz) { - u32 sess_data_sz = 0, i; - - if (rte_cryptodev_count () == 0) - return -1; + cryptodev_main_t *cmt = &cryptodev_main; + cryptodev_inst_t *cinst; + u32 max_sess = 0, max_dp = 0; - for (i = 0; i < rte_cryptodev_count (); i++) + vec_foreach (cinst, cmt->cryptodev_inst) { - u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i); + u32 sess_sz = rte_cryptodev_sym_get_private_session_size (cinst->dev_id); + u32 dp_sz = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id); - sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz; + max_sess = clib_max (sess_sz, max_sess); + max_dp = clib_max (dp_sz, max_dp); } - return sess_data_sz; + *max_sess_sz = max_sess; + *max_dp_sz = max_dp; } static void @@ -1384,7 +1368,74 @@ dpdk_disable_cryptodev_engine (vlib_main_t * vm) rte_free (ptd->aad_buf); if (ptd->cached_frame) rte_ring_free (ptd->cached_frame); + if (ptd->reset_sess.crypto_sess) + { + struct rte_mempool *mp = + rte_mempool_from_obj ((void *) ptd->reset_sess.crypto_sess); + + rte_mempool_free (mp); + ptd->reset_sess.crypto_sess = 0; + } + } +} + +static clib_error_t * +create_reset_sess (cryptodev_engine_thread_t *ptd, u32 lcore, u32 numa, + u32 sess_sz) +{ + struct rte_crypto_sym_xform xform = { 0 }; + struct rte_crypto_aead_xform *aead_xform = &xform.aead; + struct rte_cryptodev_sym_session *sess; + struct rte_mempool *mp = 0; + u8 key[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + u8 *name = 0; + clib_error_t *error = 0; + + /* create session pool for the numa node */ + name = format (0, "vcryptodev_s_reset_%u_%u", numa, lcore); + mp = rte_cryptodev_sym_session_pool_create ((char *) name, 2, sess_sz, 0, 0, + numa); + if (!mp) + { + error = clib_error_return (0, "Not enough memory for mp %s", name); + goto error_exit; + } + vec_free (name); + + xform.type = RTE_CRYPTO_SYM_XFORM_AEAD; + aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM; + aead_xform->op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + aead_xform->aad_length = 8; + aead_xform->digest_length = 16; + aead_xform->iv.offset = 0; + aead_xform->iv.length = 12; + aead_xform->key.data = key; + aead_xform->key.length = 16; + + sess = rte_cryptodev_sym_session_create (mp); + if (!sess) + { + error = clib_error_return (0, "failed to create session"); + goto error_exit; + } + + if (rte_cryptodev_sym_session_init (ptd->cryptodev_id, sess, &xform, mp) < 0) + { + error = clib_error_return (0, "failed to create session private"); + goto error_exit; } + + ptd->reset_sess.crypto_sess = sess; + + return 0; + +error_exit: + if (mp) + rte_mempool_free (mp); + if (name) + vec_free (name); + + return error; } clib_error_t * @@ -1398,7 +1449,7 @@ dpdk_cryptodev_init (vlib_main_t * vm) u32 skip_master = vlib_num_workers () > 0; u32 n_workers = tm->n_vlib_mains - skip_master; u32 numa = vm->numa_node; - i32 sess_sz; + u32 sess_sz, dp_sz; u32 eidx; u32 i; u8 *name = 0; @@ -1406,44 +1457,7 @@ dpdk_cryptodev_init (vlib_main_t * vm) cmt->iova_mode = rte_eal_iova_mode (); - sess_sz = cryptodev_get_session_sz(vm, n_workers); - if (sess_sz < 0) - { - error = clib_error_return (0, "Not enough cryptodevs"); - return error; - } - vec_validate (cmt->per_numa_data, vm->numa_node); - numa_data = vec_elt_at_index (cmt->per_numa_data, numa); - - /* create session pool for the numa node */ - name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0); - mp = rte_cryptodev_sym_session_pool_create ((char *) name, - CRYPTODEV_NB_SESSION, - 0, 0, 0, numa); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - goto err_handling; - } - vec_free (name); - - numa_data->sess_pool = mp; - - /* create session private pool for the numa node */ - name = format (0, "cryptodev_sess_pool_%u%c", numa, 0); - mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0, - 0, NULL, NULL, NULL, NULL, numa, 0); - if (!mp) - { - error = clib_error_return (0, "Not enough memory for mp %s", name); - vec_free (name); - goto err_handling; - } - - vec_free (name); - - numa_data->sess_priv_pool = mp; /* probe all cryptodev devices and get queue info */ if (cryptodev_probe (vm, n_workers) < 0) @@ -1452,6 +1466,8 @@ dpdk_cryptodev_init (vlib_main_t * vm) goto err_handling; } + cryptodev_get_max_sz (&sess_sz, &dp_sz); + clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains); clib_spinlock_init (&cmt->tlock); @@ -1460,11 +1476,13 @@ dpdk_cryptodev_init (vlib_main_t * vm) for (i = skip_master; i < tm->n_vlib_mains; i++) { ptd = cmt->per_thread_data + i; - cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO); + numa = vlib_mains[i]->numa_node; + ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS * CRYPTODEV_MAX_AAD_SIZE, CLIB_CACHE_LINE_BYTES, numa); + if (ptd->aad_buf == 0) { error = clib_error_return (0, "Failed to alloc aad buf"); @@ -1473,6 +1491,13 @@ dpdk_cryptodev_init (vlib_main_t * vm) ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf); + ptd->ctx = rte_zmalloc_socket (0, dp_sz, CLIB_CACHE_LINE_BYTES, numa); + if (!ptd->ctx) + { + error = clib_error_return (0, "Failed to alloc raw dp ctx"); + goto err_handling; + } + name = format (0, "cache_frame_ring_%u%u", numa, i); ptd->cached_frame = rte_ring_create ((char *)name, CRYPTODEV_DEQ_CACHE_SZ, numa, @@ -1480,15 +1505,58 @@ dpdk_cryptodev_init (vlib_main_t * vm) if (ptd->cached_frame == 0) { - error = clib_error_return (0, "Failed to frame ring"); + error = clib_error_return (0, "Failed to alloc frame ring"); goto err_handling; } vec_free (name); + + vec_validate (cmt->per_numa_data, numa); + numa_data = vec_elt_at_index (cmt->per_numa_data, numa); + + if (!numa_data->sess_pool) + { + /* create session pool for the numa node */ + name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0); + mp = rte_cryptodev_sym_session_pool_create ( + (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa); + if (!mp) + { + error = + clib_error_return (0, "Not enough memory for mp %s", name); + goto err_handling; + } + vec_free (name); + + numa_data->sess_pool = mp; + + /* create session private pool for the numa node */ + name = format (0, "cryptodev_sess_pool_%u%c", numa, 0); + mp = + rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, + 0, 0, NULL, NULL, NULL, NULL, numa, 0); + if (!mp) + { + error = + clib_error_return (0, "Not enough memory for mp %s", name); + vec_free (name); + goto err_handling; + } + + vec_free (name); + + numa_data->sess_priv_pool = mp; + } + + error = create_reset_sess (ptd, i, numa, sess_sz); + if (error) + goto err_handling; + + cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO); } /* register handler */ - eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79, - "DPDK Cryptodev Engine"); + eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100, + "DPDK Cryptodev Engine"); #define _(a, b, c, d, e, f) \ vnet_crypto_register_async_handler \ @@ -1518,6 +1586,12 @@ dpdk_cryptodev_init (vlib_main_t * vm) vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler); + /* this engine is only enabled when cryptodev device(s) are presented in + * startup.conf. Assume it is wanted to be used, turn on async mode here. + */ + vnet_crypto_request_async_mode (1); + ipsec_set_async_mode (1); + return 0; err_handling: diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c deleted file mode 100644 index 8fdda020a77..00000000000 --- a/src/plugins/dpdk/ipsec/cli.c +++ /dev/null @@ -1,674 +0,0 @@ -/* - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -static u8 * -format_crypto_resource (u8 * s, va_list * args) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - - u32 indent = va_arg (*args, u32); - u32 res_idx = va_arg (*args, u32); - - crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); - - - s = format (s, "%U thr_id %3d qp %2u dec_inflight %u, enc_inflights %u\n", - format_white_space, indent, (i16) res->thread_idx, - res->qp_id, res->inflights[0], res->inflights[1]); - - return s; -} - -static u8 * -format_crypto (u8 * s, va_list * args) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_dev_t *dev = va_arg (*args, crypto_dev_t *); - crypto_drv_t *drv = vec_elt_at_index (dcm->drv, dev->drv_id); - u64 feat, mask; - u32 i; - char *pre = " "; - - s = format (s, "%-25s%-20s%-10s\n", dev->name, drv->name, - rte_cryptodevs[dev->id].data->dev_started ? "up" : "down"); - s = format (s, " numa_node %u, max_queues %u\n", dev->numa, dev->max_qp); - - if (dev->features) - { - for (mask = 1; mask != 0; mask <<= 1) - { - feat = dev->features & mask; - if (feat) - { - s = - format (s, "%s%s", pre, - rte_cryptodev_get_feature_name (feat)); - pre = ", "; - } - } - s = format (s, "\n"); - } - - s = format (s, " Cipher:"); - pre = " "; - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - if (dev->cipher_support[i]) - { - s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); - pre = ", "; - } - s = format (s, "\n"); - - s = format (s, " Auth:"); - pre = " "; - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - if (dev->auth_support[i]) - { - s = format (s, "%s%s", pre, dcm->auth_algs[i].name); - pre = ", "; - } - s = format (s, "\n"); - - struct rte_cryptodev_stats stats; - rte_cryptodev_stats_get (dev->id, &stats); - - s = - format (s, - " enqueue %-10lu dequeue %-10lu enqueue_err %-10lu dequeue_err %-10lu \n", - stats.enqueued_count, stats.dequeued_count, - stats.enqueue_err_count, stats.dequeue_err_count); - - u16 *res_idx; - s = format (s, " free_resources %u :", vec_len (dev->free_resources)); - - u32 indent = format_get_indent (s); - s = format (s, "\n"); - - /* *INDENT-OFF* */ - vec_foreach (res_idx, dev->free_resources) - s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); - /* *INDENT-ON* */ - - s = format (s, " used_resources %u :", vec_len (dev->used_resources)); - indent = format_get_indent (s); - - s = format (s, "\n"); - - /* *INDENT-OFF* */ - vec_foreach (res_idx, dev->used_resources) - s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); - /* *INDENT-ON* */ - - s = format (s, "\n"); - - return s; -} - - -static clib_error_t * -clear_crypto_stats_fn (vlib_main_t * vm, unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_dev_t *dev; - - /* *INDENT-OFF* */ - vec_foreach (dev, dcm->dev) - rte_cryptodev_stats_reset (dev->id); - /* *INDENT-ON* */ - - return NULL; -} - -/*? - * This command is used to clear the DPDK Crypto device statistics. - * - * @cliexpar - * Example of how to clear the DPDK Crypto device statistics: - * @cliexsart{clear dpdk crypto devices statistics} - * vpp# clear dpdk crypto devices statistics - * @cliexend - * Example of clearing the DPDK Crypto device statistic data: - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (clear_dpdk_crypto_stats, static) = { - .path = "clear dpdk crypto devices statistics", - .short_help = "clear dpdk crypto devices statistics", - .function = clear_crypto_stats_fn, -}; -/* *INDENT-ON* */ - - -static clib_error_t * -show_dpdk_crypto_fn (vlib_main_t * vm, unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_dev_t *dev; - - /* *INDENT-OFF* */ - vec_foreach (dev, dcm->dev) - vlib_cli_output (vm, "%U", format_crypto, dev); - /* *INDENT-ON* */ - - return NULL; -} - -/*? - * This command is used to display the DPDK Crypto device information. - * - * @cliexpar - * Example of how to display the DPDK Crypto device information: - * @cliexsart{show dpdk crypto devices} - * vpp# show dpdk crypto devices - * aesni_mb0 crypto_aesni_mb up - * numa_node 0, max_queues 4 - * SYMMETRIC_CRYPTO, SYM_OPERATION_CHAINING, CPU_AVX2, CPU_AESNI - * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256, aes-gcm-128, aes-gcm-192, aes-gcm-256 - * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 - * enqueue 2 dequeue 2 enqueue_err 0 dequeue_err 0 - * free_resources 3 : - * thr_id -1 qp 3 inflight 0 - * thr_id -1 qp 2 inflight 0 - * thr_id -1 qp 1 inflight 0 - * used_resources 1 : - * thr_id 1 qp 0 inflight 0 - * @cliexend - * Example of displaying the DPDK Crypto device data when enabled: - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (show_dpdk_crypto, static) = { - .path = "show dpdk crypto devices", - .short_help = "show dpdk crypto devices", - .function = show_dpdk_crypto_fn, -}; - -/* *INDENT-ON* */ -static u8 * -format_crypto_worker (u8 * s, va_list * args) -{ - u32 thread_idx = va_arg (*args, u32); - u8 verbose = (u8) va_arg (*args, u32); - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm; - crypto_resource_t *res; - u16 *res_idx; - char *pre, *ind; - u32 i; - - cwm = vec_elt_at_index (dcm->workers_main, thread_idx); - - s = format (s, "Thread %u (%v):\n", thread_idx, - vlib_worker_threads[thread_idx].name); - - /* *INDENT-OFF* */ - vec_foreach (res_idx, cwm->resource_idx) - { - ind = " "; - res = vec_elt_at_index (dcm->resource, res_idx[0]); - s = format (s, "%s%-20s dev-id %2u queue-pair %2u\n", - ind, vec_elt_at_index (dcm->dev, res->dev_id)->name, - res->dev_id, res->qp_id); - - ind = " "; - if (verbose) - { - s = format (s, "%sCipher:", ind); - pre = " "; - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - if (cwm->cipher_resource_idx[i] == res_idx[0]) - { - s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); - pre = ", "; - } - s = format (s, "\n"); - - s = format (s, "%sAuth:", ind); - pre = " "; - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - if (cwm->auth_resource_idx[i] == res_idx[0]) - { - s = format (s, "%s%s", pre, dcm->auth_algs[i].name); - pre = ", "; - } - s = format (s, "\n"); - } - } - /* *INDENT-ON* */ - - return s; -} - -static clib_error_t * -common_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, - vlib_cli_command_t * cmd, u8 verbose) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - clib_error_t *error = NULL; - u32 i; - u8 skip_master; - - if (!dcm->enabled) - { - vlib_cli_output (vm, "\nDPDK Cryptodev support is disabled\n"); - return error; - } - - skip_master = vlib_num_workers () > 0; - - /* *INDENT-OFF* */ - vec_foreach_index (i, dcm->workers_main) - { - if (i < skip_master) - continue; - - vlib_cli_output (vm, "%U\n", format_crypto_worker, i, verbose); - } - /* *INDENT-ON* */ - - return error; -} - -static clib_error_t * -show_dpdk_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - return common_crypto_placement_fn (vm, input, cmd, 0); -} - -static clib_error_t * -show_dpdk_crypto_placement_v_fn (vlib_main_t * vm, unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - return common_crypto_placement_fn (vm, input, cmd, 1); -} - -/*? - * This command is used to display the DPDK Crypto device placement. - * - * @cliexpar - * Example of displaying the DPDK Crypto device placement: - * @cliexstart{show dpdk crypto placement} - * vpp# show dpdk crypto placement - * Thread 1 (vpp_wk_0): - * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 - * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 - * - * Thread 2 (vpp_wk_1): - * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 - * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (show_dpdk_crypto_placement, static) = { - .path = "show dpdk crypto placement", - .short_help = "show dpdk crypto placement", - .function = show_dpdk_crypto_placement_fn, -}; -/* *INDENT-ON* */ - -/*? - * This command is used to display the DPDK Crypto device placement - * with verbose output. - * - * @cliexpar - * Example of displaying the DPDK Crypto device placement verbose: - * @cliexstart{show dpdk crypto placement verbose} - * vpp# show dpdk crypto placement verbose - * Thread 1 (vpp_wk_0): - * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 - * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 - * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 - * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 - * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 - * Auth: - * - * Thread 2 (vpp_wk_1): - * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 - * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 - * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 - * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 - * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 - * Auth: - * - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (show_dpdk_crypto_placement_v, static) = { - .path = "show dpdk crypto placement verbose", - .short_help = "show dpdk crypto placement verbose", - .function = show_dpdk_crypto_placement_v_fn, -}; -/* *INDENT-ON* */ - -static clib_error_t * -set_dpdk_crypto_placement_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - unformat_input_t _line_input, *line_input = &_line_input; - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm; - crypto_dev_t *dev; - u32 thread_idx, i; - u16 res_idx, *idx; - u8 dev_idx, auto_en = 0; - - if (!unformat_user (input, unformat_line_input, line_input)) - return clib_error_return (0, "invalid syntax"); - - while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) - { - if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) - ; - else if (unformat (line_input, "auto")) - auto_en = 1; - else - { - unformat_free (line_input); - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); - } - } - - unformat_free (line_input); - - if (auto_en) - { - crypto_auto_placement (); - return 0; - } - - /* TODO support device name */ - - if (!(dev_idx < vec_len (dcm->dev))) - return clib_error_return (0, "please specify valid device index"); - - if (thread_idx != (u32) ~ 0 && !(thread_idx < vec_len (dcm->workers_main))) - return clib_error_return (0, "invalid thread index"); - - dev = vec_elt_at_index (dcm->dev, dev_idx); - if (!(vec_len (dev->free_resources))) - return clib_error_return (0, "all device resources are being used"); - - /* Check thread is not already using the device */ - /* *INDENT-OFF* */ - vec_foreach (idx, dev->used_resources) - if (dcm->resource[idx[0]].thread_idx == thread_idx) - return clib_error_return (0, "thread %u already using device %u", - thread_idx, dev_idx); - /* *INDENT-ON* */ - - res_idx = vec_pop (dev->free_resources); - vec_add1 (dev->used_resources, res_idx); - - cwm = vec_elt_at_index (dcm->workers_main, thread_idx); - - ASSERT (dcm->resource[res_idx].thread_idx == (u16) ~ 0); - dcm->resource[res_idx].thread_idx = thread_idx; - - /* Add device to vector of polling resources */ - vec_add1 (cwm->resource_idx, res_idx); - - /* Set device as default for all supported algos */ - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - if (dev->cipher_support[i]) - { - if (cwm->cipher_resource_idx[i] == (u16) ~ 0) - dcm->cipher_algs[i].disabled--; - cwm->cipher_resource_idx[i] = res_idx; - } - - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - if (dev->auth_support[i]) - { - if (cwm->auth_resource_idx[i] == (u16) ~ 0) - dcm->auth_algs[i].disabled--; - cwm->auth_resource_idx[i] = res_idx; - } - - /* Check if any unused resource */ - - u8 used = 0; - /* *INDENT-OFF* */ - vec_foreach (idx, cwm->resource_idx) - { - if (idx[0] == res_idx) - continue; - - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - used |= cwm->cipher_resource_idx[i] == idx[0]; - - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - used |= cwm->auth_resource_idx[i] == idx[0]; - - vec_elt_at_index (dcm->resource, idx[0])->remove = !used; - } - /* *INDENT-ON* */ - - return 0; -} - -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (set_dpdk_crypto_placement, static) = { - .path = "set dpdk crypto placement", - .short_help = "set dpdk crypto placement ( | auto)", - .function = set_dpdk_crypto_placement_fn, -}; -/* *INDENT-ON* */ - -/* - * The thread will not enqueue more operations to the device but will poll - * from it until there are no more inflight operations. -*/ -static void -dpdk_crypto_clear_resource (u16 res_idx) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); - crypto_worker_main_t *cwm = &dcm->workers_main[res->thread_idx]; - u32 i; - - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - if (cwm->cipher_resource_idx[i] == res_idx) - { - cwm->cipher_resource_idx[i] = (u16) ~ 0; - dcm->cipher_algs[i].disabled++; - } - - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - if (cwm->auth_resource_idx[i] == res_idx) - { - cwm->auth_resource_idx[i] = (u16) ~ 0; - dcm->auth_algs[i].disabled++; - } - - /* Fully remove device on crypto_node once there are no inflights */ - res->remove = 1; -} - -static clib_error_t * -clear_dpdk_crypto_placement_fn (vlib_main_t * vm, - unformat_input_t * - input, vlib_cli_command_t * cmd) -{ - unformat_input_t _line_input, *line_input = &_line_input; - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_dev_t *dev; - u32 thread_idx = (u32) ~ 0; - u16 *res_idx; - u8 dev_idx = (u8) ~ 0; - u8 free_all = 0; - - if (!unformat_user (input, unformat_line_input, line_input)) - return clib_error_return (0, "invalid syntax"); - - while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) - { - if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) - ; - else if (unformat (line_input, "%u", &dev_idx)) - free_all = 1; - else - { - unformat_free (line_input); - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); - } - } - - unformat_free (line_input); - - if (!(dev_idx < vec_len (dcm->dev))) - return clib_error_return (0, "invalid device index"); - - dev = vec_elt_at_index (dcm->dev, dev_idx); - - /* Clear all resources placements */ - if (free_all) - { - /* *INDENT-OFF* */ - vec_foreach (res_idx, dev->used_resources) - dpdk_crypto_clear_resource (res_idx[0]); - /* *INDENT-ON* */ - - return 0; - } - - if (!(thread_idx < vec_len (dcm->workers_main))) - return clib_error_return (0, "invalid thread index"); - - /* Clear placement of device for given thread index */ - /* *INDENT-OFF* */ - vec_foreach (res_idx, dev->used_resources) - if (dcm->resource[res_idx[0]].thread_idx == thread_idx) - break; - /* *INDENT-ON* */ - - if (!(res_idx < vec_end (dev->used_resources))) - return clib_error_return (0, "thread %u is not using device %u", - thread_idx, dev_idx); - - dpdk_crypto_clear_resource (res_idx[0]); - - return 0; -} - -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (clear_dpdk_crypto_placement, static) = { - .path = "clear dpdk crypto placement", - .short_help = "clear dpdk crypto placement []", - .function = clear_dpdk_crypto_placement_fn, -}; -/* *INDENT-ON* */ - -u8 * -format_dpdk_mempool (u8 * s, va_list * args) -{ - struct rte_mempool *mp = va_arg (*args, struct rte_mempool *); - u32 indent = format_get_indent (s); - u32 count = rte_mempool_avail_count (mp); - - s = format (s, "%s\n%Uavailable %7d, allocated %7d total %7d\n", - mp->name, format_white_space, indent + 2, - count, mp->size - count, mp->size); - s = format (s, "%Uphys_addr %p, flags %08x, nb_mem_chunks %u\n", - format_white_space, indent + 2, - mp->mz->iova, mp->flags, mp->nb_mem_chunks); - s = format (s, "%Uelt_size %4u, header_size %3u, trailer_size %u\n", - format_white_space, indent + 2, - mp->elt_size, mp->header_size, mp->trailer_size); - s = format (s, "%Uprivate_data_size %3u, total_elt_size %u\n", - format_white_space, indent + 2, - mp->private_data_size, - mp->elt_size + mp->header_size + mp->trailer_size); - return s; -} - -static clib_error_t * -show_dpdk_crypto_pools_fn (vlib_main_t * vm, - unformat_input_t * input, vlib_cli_command_t * cmd) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - - /* *INDENT-OFF* */ - vec_foreach (data, dcm->data) - { - if (data->crypto_op) - vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->crypto_op); - if (data->session_h) - vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->session_h); - - struct rte_mempool **mp; - vec_foreach (mp, data->session_drv) - if (mp[0]) - vlib_cli_output (vm, "%U\n", format_dpdk_mempool, mp[0]); - } - /* *INDENT-ON* */ - - return NULL; -} - -/*? - * This command is used to display the DPDK Crypto pools information. - * - * @cliexpar - * Example of how to display the DPDK Crypto pools information: - * @cliexstart{show crypto device mapping} - * vpp# show dpdk crypto pools - * crypto_pool_numa1 - * available 15872, allocated 512 total 16384 - * phys_addr 0xf3d2086c0, flags 00000010, nb_mem_chunks 1 - * elt_size 160, header_size 64, trailer_size 96 - * private_data_size 64, total_elt_size 320 - * - * session_h_pool_numa1 - * available 19998, allocated 2 total 20000 - * phys_addr 0xf3c9c4380, flags 00000010, nb_mem_chunks 1 - * elt_size 40, header_size 64, trailer_size 88 - * private_data_size 0, total_elt_size 192 - * - * session_drv0_pool_numa1 - * available 19998, allocated 2 total 20000 - * phys_addr 0xf3ad42d80, flags 00000010, nb_mem_chunks 1 - * elt_size 512, header_size 64, trailer_size 0 - * private_data_size 0, total_elt_size 576 - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (show_dpdk_crypto_pools, static) = { - .path = "show dpdk crypto pools", - .short_help = "show dpdk crypto pools", - .function = show_dpdk_crypto_pools_fn, -}; -/* *INDENT-ON* */ - -/* TODO Allow user define number of sessions supported */ -/* TODO Allow user define descriptor queue size */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/src/plugins/dpdk/ipsec/crypto_node.c deleted file mode 100644 index 893848c05b6..00000000000 --- a/src/plugins/dpdk/ipsec/crypto_node.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - *------------------------------------------------------------------ - * crypto_node.c - DPDK Cryptodev input node - * - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a opy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------ - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -#define foreach_dpdk_crypto_input_error \ - _(DQ_COPS, "Crypto ops dequeued") \ - _(AUTH_FAILED, "Crypto verification failed") \ - _(STATUS, "Crypto operation failed") - -typedef enum -{ -#define _(f,s) DPDK_CRYPTO_INPUT_ERROR_##f, - foreach_dpdk_crypto_input_error -#undef _ - DPDK_CRYPTO_INPUT_N_ERROR, -} dpdk_crypto_input_error_t; - -static char *dpdk_crypto_input_error_strings[] = { -#define _(n, s) s, - foreach_dpdk_crypto_input_error -#undef _ -}; - -extern vlib_node_registration_t dpdk_crypto_input_node; - -typedef struct -{ - /* dev id of this cryptodev */ - u16 dev_id; - u16 next_index; -} dpdk_crypto_input_trace_t; - -static u8 * -format_dpdk_crypto_input_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - dpdk_crypto_input_trace_t *t = va_arg (*args, dpdk_crypto_input_trace_t *); - - s = format (s, "cryptodev-id %d next-index %d", t->dev_id, t->next_index); - - return s; -} - -static_always_inline void -dpdk_crypto_input_check_op (vlib_main_t * vm, vlib_node_runtime_t * node, - struct rte_crypto_op *op0, u16 * next) -{ - if (PREDICT_FALSE (op0->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) - { - next[0] = DPDK_CRYPTO_INPUT_NEXT_DROP; - vlib_node_increment_counter (vm, - node->node_index, - DPDK_CRYPTO_INPUT_ERROR_STATUS, 1); - /* if auth failed */ - if (op0->status == RTE_CRYPTO_OP_STATUS_AUTH_FAILED) - vlib_node_increment_counter (vm, - node->node_index, - DPDK_CRYPTO_INPUT_ERROR_AUTH_FAILED, 1); - } -} - -always_inline void -dpdk_crypto_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, - u8 dev_id, u32 * bis, u16 * nexts, u32 n_deq) -{ - u32 n_left, n_trace; - - if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)))) - { - n_left = n_deq; - - while (n_trace && n_left) - { - vlib_buffer_t *b0; - u16 next; - u32 bi; - - bi = bis[0]; - next = nexts[0]; - - b0 = vlib_get_buffer (vm, bi); - - if (PREDICT_TRUE - (vlib_trace_buffer (vm, node, next, b0, /* follow_chain */ 0))) - { - dpdk_crypto_input_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->dev_id = dev_id; - tr->next_index = next; - n_trace--; - } - - n_left--; - nexts++; - bis++; - } - vlib_set_trace_count (vm, node, n_trace); - } -} - -static_always_inline u32 -dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm, - vlib_node_runtime_t * node, crypto_resource_t * res) -{ - u8 numa = rte_socket_id (); - u32 n_ops, total_n_deq, n_deq[2]; - u32 bis[VLIB_FRAME_SIZE], *bi; - u16 nexts[VLIB_FRAME_SIZE], *next; - struct rte_crypto_op **ops; - - n_deq[0] = 0; - n_deq[1] = 0; - bi = bis; - next = nexts; - ops = cwm->ops; - - n_ops = total_n_deq = rte_cryptodev_dequeue_burst (res->dev_id, - res->qp_id, - ops, VLIB_FRAME_SIZE); - /* no op dequeued, do not proceed */ - if (n_ops == 0) - return 0; - - while (n_ops >= 4) - { - struct rte_crypto_op *op0, *op1, *op2, *op3; - - /* Prefetch next iteration. */ - if (n_ops >= 8) - { - CLIB_PREFETCH (ops[4], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (ops[5], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (ops[6], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (ops[7], CLIB_CACHE_LINE_BYTES, LOAD); - - CLIB_PREFETCH (crypto_op_get_priv (ops[4]), - CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (crypto_op_get_priv (ops[5]), - CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (crypto_op_get_priv (ops[6]), - CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (crypto_op_get_priv (ops[7]), - CLIB_CACHE_LINE_BYTES, LOAD); - } - - op0 = ops[0]; - op1 = ops[1]; - op2 = ops[2]; - op3 = ops[3]; - - next[0] = crypto_op_get_priv (op0)->next; - next[1] = crypto_op_get_priv (op1)->next; - next[2] = crypto_op_get_priv (op2)->next; - next[3] = crypto_op_get_priv (op3)->next; - - bi[0] = crypto_op_get_priv (op0)->bi; - bi[1] = crypto_op_get_priv (op1)->bi; - bi[2] = crypto_op_get_priv (op2)->bi; - bi[3] = crypto_op_get_priv (op3)->bi; - - n_deq[crypto_op_get_priv (op0)->encrypt] += 1; - n_deq[crypto_op_get_priv (op1)->encrypt] += 1; - n_deq[crypto_op_get_priv (op2)->encrypt] += 1; - n_deq[crypto_op_get_priv (op3)->encrypt] += 1; - - dpdk_crypto_input_check_op (vm, node, op0, next + 0); - dpdk_crypto_input_check_op (vm, node, op1, next + 1); - dpdk_crypto_input_check_op (vm, node, op2, next + 2); - dpdk_crypto_input_check_op (vm, node, op3, next + 3); - - op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op1->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op2->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op3->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - /* next */ - next += 4; - n_ops -= 4; - ops += 4; - bi += 4; - } - while (n_ops > 0) - { - struct rte_crypto_op *op0; - - op0 = ops[0]; - - next[0] = crypto_op_get_priv (op0)->next; - bi[0] = crypto_op_get_priv (op0)->bi; - - n_deq[crypto_op_get_priv (op0)->encrypt] += 1; - - dpdk_crypto_input_check_op (vm, node, op0, next + 0); - - op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - /* next */ - next += 1; - n_ops -= 1; - ops += 1; - bi += 1; - } - - vlib_node_increment_counter (vm, node->node_index, - DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq); - - res->inflights[0] -= n_deq[0]; - res->inflights[1] -= n_deq[1]; - - vlib_buffer_enqueue_to_next (vm, node, bis, nexts, total_n_deq); - - dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, total_n_deq); - - crypto_free_ops (numa, cwm->ops, total_n_deq); - - return total_n_deq; -} - -static_always_inline uword -dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm = &dcm->workers_main[vm->thread_index]; - crypto_resource_t *res; - u32 n_deq = 0; - u16 *remove = NULL, *res_idx; - word i; - - /* *INDENT-OFF* */ - vec_foreach (res_idx, cwm->resource_idx) - { - res = vec_elt_at_index (dcm->resource, res_idx[0]); - u32 inflights = res->inflights[0] + res->inflights[1]; - - if (inflights) - n_deq += dpdk_crypto_dequeue (vm, cwm, node, res); - - inflights = res->inflights[0] + res->inflights[1]; - if (PREDICT_FALSE (res->remove && !(inflights))) - vec_add1 (remove, res_idx[0]); - } - /* *INDENT-ON* */ - - /* TODO removal on master thread? */ - if (PREDICT_FALSE (remove != NULL)) - { - /* *INDENT-OFF* */ - vec_foreach (res_idx, remove) - { - i = vec_search (cwm->resource_idx, res_idx[0]); - vec_del1 (cwm->resource_idx, i); - - res = vec_elt_at_index (dcm->resource, res_idx[0]); - res->thread_idx = (u16) ~0; - res->remove = 0; - - i = vec_search (dcm->dev[res->dev_id].used_resources, res_idx[0]); - ASSERT (i != (u16) ~0); - vec_del1 (dcm->dev[res->dev_id].used_resources, i); - vec_add1 (dcm->dev[res->dev_id].free_resources, res_idx[0]); - } - /* *INDENT-ON* */ - - vec_free (remove); - } - - return n_deq; -} - -VLIB_NODE_FN (dpdk_crypto_input_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_crypto_input_inline (vm, node, from_frame); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_crypto_input_node) = -{ - .name = "dpdk-crypto-input", - .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, - .format_trace = format_dpdk_crypto_input_trace, - .type = VLIB_NODE_TYPE_INPUT, - .state = VLIB_NODE_STATE_DISABLED, - .n_errors = DPDK_CRYPTO_INPUT_N_ERROR, - .error_strings = dpdk_crypto_input_error_strings, - .n_next_nodes = DPDK_CRYPTO_INPUT_N_NEXT, - .next_nodes = - { -#define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n, - foreach_dpdk_crypto_input_next -#undef _ - }, -}; -/* *INDENT-ON* */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/plugins/dpdk/ipsec/dir.dox b/src/plugins/dpdk/ipsec/dir.dox deleted file mode 100644 index 05504541abb..00000000000 --- a/src/plugins/dpdk/ipsec/dir.dox +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2016 Intel and/or its affiliates. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Doxygen directory documentation */ - -/** -@dir src/plugins/dpdk/ipsec -@brief IPSec ESP encrypt/decrypt using DPDK Cryptodev API. - -This directory contains the source code for the DPDK Crypto abstraction layer. - -*/ -/*? %%clicmd:group_label DPDK Crypto %% ?*/ -/*? %%syscfg:group_label DPDK Crypto %% ?*/ diff --git a/src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md b/src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md deleted file mode 100644 index 8cf51f07c03..00000000000 --- a/src/plugins/dpdk/ipsec/dpdk_crypto_ipsec_doc.md +++ /dev/null @@ -1,87 +0,0 @@ -# VPP IPSec implementation using DPDK Cryptodev API {#dpdk_crypto_ipsec_doc} - -This document is meant to contain all related information about implementation and usability. - - -## VPP IPsec with DPDK Cryptodev - -DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)). - -When there are enough Cryptodev resources for all workers, the node graph is reconfigured by adding and changing the default next nodes. - -The following nodes are added: -* dpdk-crypto-input : polling input node, dequeuing from crypto devices. -* dpdk-esp-encrypt : internal node. -* dpdk-esp-decrypt : internal node. -* dpdk-esp-encrypt-post : internal node. -* dpdk-esp-decrypt-post : internal node. - -Set new default next nodes: -* for esp encryption: esp-encrypt -> dpdk-esp-encrypt -* for esp decryption: esp-decrypt -> dpdk-esp-decrypt - - -### How to enable VPP IPSec with DPDK Cryptodev support - -When building DPDK with VPP, Cryptodev support is always enabled. - -Additionally, on x86_64 platforms, DPDK is built with SW crypto support. - - -### Crypto Resources allocation - -VPP allocates crypto resources based on a best effort approach: -* first allocate Hardware crypto resources, then Software. -* if there are not enough crypto resources for all workers, the graph node is not modified and the default VPP IPsec implementation based in OpenSSL is used. The following message is displayed: - - 0: dpdk_ipsec_init: not enough Cryptodevs, default to OpenSSL IPsec - - -### Configuration example - -To enable DPDK Cryptodev the user just need to provide cryptodevs in the startup.conf. - -Below is an example startup.conf, it is not meant to be a default configuration: - -``` -dpdk { - dev 0000:81:00.0 - dev 0000:81:00.1 - dev 0000:85:01.0 - dev 0000:85:01.1 - vdev crypto_aesni_mb0,socket_id=1 - vdev crypto_aesni_mb1,socket_id=1 -} -``` - -In the above configuration: -* 0000:81:01.0 and 0000:81:01.1 are Ethernet device BDFs. -* 0000:85:01.0 and 0000:85:01.1 are Crypto device BDFs and they require the same driver binding as DPDK Ethernet devices but they do not support any extra configuration options. -* Two AESNI-MB Software (Virtual) Cryptodev PMDs are created in NUMA node 1. - -For further details refer to [DPDK Crypto Device Driver documentation](http://dpdk.org/doc/guides/cryptodevs/index.html) - -### Operational data - -The following CLI command displays the Cryptodev/Worker mapping: - - show crypto device mapping [verbose] - - -### nasm - -Building the DPDK Crypto Libraries requires the open source project nasm (The Netwide -Assembler) to be installed. Recommended version of nasm is 2.12.02. Minimum supported -version of nasm is 2.11.06. Use the following command to determine the current nasm version: - - nasm -v - -CentOS 7.3 and earlier and Fedora 21 and earlier use unsupported versions -of nasm. Use the following set of commands to build a supported version: - - wget http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2 - tar -xjvf nasm-2.12.02.tar.bz2 - cd nasm-2.12.02/ - ./configure - make - sudo make install diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c deleted file mode 100644 index 9a782abeb94..00000000000 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ /dev/null @@ -1,739 +0,0 @@ -/* - * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev - * - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a opy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#define foreach_esp_decrypt_next \ -_(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input-no-checksum") \ -_(IP6_INPUT, "ip6-input") - -#define _(v, s) ESP_DECRYPT_NEXT_##v, -typedef enum -{ - foreach_esp_decrypt_next -#undef _ - ESP_DECRYPT_N_NEXT, -} esp_decrypt_next_t; - -#define foreach_esp_decrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(DECRYPTION_FAILED, "ESP decryption failed") \ - _(REPLAY, "SA replayed packet") \ - _(NOT_IP, "Not IP packet (dropped)") \ - _(ENQ_FAIL, "Enqueue decrypt failed (queue full)") \ - _(DISCARD, "Not enough crypto operations") \ - _(BAD_LEN, "Invalid ciphertext length") \ - _(SESSION, "Failed to get crypto session") \ - _(NOSUP, "Cipher/Auth not supported") - - -typedef enum -{ -#define _(sym,str) ESP_DECRYPT_ERROR_##sym, - foreach_esp_decrypt_error -#undef _ - ESP_DECRYPT_N_ERROR, -} esp_decrypt_error_t; - -static char *esp_decrypt_error_strings[] = { -#define _(sym,string) string, - foreach_esp_decrypt_error -#undef _ -}; - -extern vlib_node_registration_t dpdk_esp4_decrypt_node; -extern vlib_node_registration_t dpdk_esp6_decrypt_node; - -typedef struct -{ - ipsec_crypto_alg_t crypto_alg; - ipsec_integ_alg_t integ_alg; - u8 packet_data[64]; -} esp_decrypt_trace_t; - -/* packet trace format function */ -static u8 * -format_esp_decrypt_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); - u32 indent = format_get_indent (s); - - s = format (s, "cipher %U auth %U\n", - format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); - s = format (s, "%U%U", - format_white_space, indent, format_esp_header, t->packet_data); - return s; -} - -always_inline uword -dpdk_esp_decrypt_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame, int is_ip6) -{ - u32 n_left_from, *from, *to_next, next_index, thread_index; - u32 thread_idx = vlib_get_thread_index (); - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_resource_t *res = 0; - ipsec_sa_t *sa0 = 0; - crypto_alg_t *cipher_alg = 0, *auth_alg = 0; - struct rte_cryptodev_sym_session *session = 0; - u32 ret, last_sa_index = ~0; - u8 numa = rte_socket_id (); - u8 is_aead = 0; - crypto_worker_main_t *cwm = - vec_elt_at_index (dcm->workers_main, thread_idx); - struct rte_crypto_op **ops = cwm->ops; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - thread_index = vm->thread_index; - - ret = crypto_alloc_ops (numa, ops, n_left_from); - if (ret) - { - if (is_ip6) - vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_DISCARD, n_left_from); - else - vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_DISCARD, n_left_from); - /* Discard whole frame */ - vlib_buffer_free (vm, from, n_left_from); - return n_left_from; - } - - next_index = ESP_DECRYPT_NEXT_DROP; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - clib_error_t *error; - u32 bi0, sa_index0, iv_size; - u8 trunc_size; - vlib_buffer_t *b0; - esp_header_t *esp0; - struct rte_mbuf *mb0; - struct rte_crypto_op *op; - u16 res_idx; - - bi0 = from[0]; - from += 1; - n_left_from -= 1; - - b0 = vlib_get_buffer (vm, bi0); - mb0 = rte_mbuf_from_vlib_buffer (b0); - esp0 = vlib_buffer_get_current (b0); - - /* ih0/ih6_0 */ - CLIB_PREFETCH (esp0, sizeof (esp0[0]) + 16, LOAD); - /* mb0 */ - CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); - - op = ops[0]; - ops += 1; - ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); - - dpdk_op_priv_t *priv = crypto_op_get_priv (op); - /* store bi in op private */ - priv->bi = bi0; - priv->encrypt = 0; - - u16 op_len = - sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); - CLIB_PREFETCH (op, op_len, STORE); - - sa_index0 = vnet_buffer (b0)->ipsec.sad_index; - vlib_prefetch_combined_counter (&ipsec_sa_counters, - thread_index, sa_index0); - - if (sa_index0 != last_sa_index) - { - sa0 = ipsec_sa_get (sa_index0); - - cipher_alg = - vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); - auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); - - is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); - if (is_aead) - auth_alg = cipher_alg; - - res_idx = get_resource (cwm, sa0); - - if (PREDICT_FALSE (res_idx == (u16) ~ 0)) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_NOSUP, 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_NOSUP, 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - res = vec_elt_at_index (dcm->resource, res_idx); - - error = crypto_get_session (&session, sa_index0, res, cwm, 0); - if (PREDICT_FALSE (error || !session)) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_SESSION, - 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_SESSION, - 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - - last_sa_index = sa_index0; - } - - /* anti-replay check */ - if (ipsec_sa_anti_replay_check - (sa0, clib_host_to_net_u32 (esp0->seq))) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_REPLAY, 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_REPLAY, 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - - if (is_ip6) - priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT6_POST; - else - { - priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST; - b0->flags |= VNET_BUFFER_F_IS_IP4; - } - - /* FIXME multi-seg */ - vlib_increment_combined_counter - (&ipsec_sa_counters, thread_index, sa_index0, - 1, b0->current_length); - - res->ops[res->n_ops] = op; - res->bi[res->n_ops] = bi0; - res->n_ops += 1; - - /* Convert vlib buffer to mbuf */ - mb0->data_len = b0->current_length; - mb0->pkt_len = b0->current_length; - mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; - - trunc_size = auth_alg->trunc_size; - iv_size = cipher_alg->iv_len; - - /* Outer IP header has already been stripped */ - u16 payload_len = - b0->current_length - sizeof (esp_header_t) - iv_size - trunc_size; - - ASSERT (payload_len >= 4); - - if (payload_len & (cipher_alg->boundary - 1)) - { - if (is_ip6) - vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_BAD_LEN, 1); - else - vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_BAD_LEN, 1); - res->n_ops -= 1; - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - - u32 cipher_off, cipher_len; - u32 auth_len = 0; - u8 *aad = NULL; - - u8 *iv = (u8 *) (esp0 + 1); - - dpdk_gcm_cnt_blk *icb = &priv->cb; - - cipher_off = sizeof (esp_header_t) + iv_size; - cipher_len = payload_len; - - u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; - u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); - - if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC) - clib_memcpy_fast (icb, iv, 16); - else /* CTR/GCM */ - { - u32 *_iv = (u32 *) iv; - - crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]); - } - - if (is_aead) - { - aad = priv->aad; - u32 *_aad = (u32 *) aad; - clib_memcpy_fast (aad, esp0, 8); - - /* _aad[3] should always be 0 */ - if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) - { - _aad[2] = _aad[1]; - _aad[1] = clib_host_to_net_u32 (sa0->seq_hi); - } - else - _aad[2] = 0; - } - else - { - auth_len = sizeof (esp_header_t) + iv_size + payload_len; - - if (ipsec_sa_is_set_USE_ESN (sa0)) - { - clib_memcpy_fast (priv->icv, digest, trunc_size); - u32 *_digest = (u32 *) digest; - _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); - auth_len += sizeof (sa0->seq_hi); - - digest = priv->icv; - digest_paddr = - op->phys_addr + (uintptr_t) priv->icv - (uintptr_t) op; - } - } - - crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, - 0, auth_len, aad, digest, digest_paddr); - trace: - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - esp_decrypt_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->crypto_alg = sa0->crypto_alg; - tr->integ_alg = sa0->integ_alg; - clib_memcpy_fast (tr->packet_data, vlib_buffer_get_current (b0), - sizeof (esp_header_t)); - } - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - if (is_ip6) - { - vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - - crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); - } - else - { - vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - - crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); - } - - crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); - - return from_frame->n_vectors; -} - -VLIB_NODE_FN (dpdk_esp4_decrypt_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_decrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ ); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node) = { - .name = "dpdk-esp4-decrypt", - .vector_size = sizeof (u32), - .format_trace = format_esp_decrypt_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (dpdk_esp6_decrypt_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_decrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ ); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp6_decrypt_node) = { - .name = "dpdk-esp6-decrypt", - .vector_size = sizeof (u32), - .format_trace = format_esp_decrypt_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), - .error_strings = esp_decrypt_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, -}; -/* *INDENT-ON* */ - -/* - * Decrypt Post Node - */ - -#define foreach_esp_decrypt_post_error \ - _(PKTS, "ESP post pkts") - -typedef enum -{ -#define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym, - foreach_esp_decrypt_post_error -#undef _ - ESP_DECRYPT_POST_N_ERROR, -} esp_decrypt_post_error_t; - -static char *esp_decrypt_post_error_strings[] = { -#define _(sym,string) string, - foreach_esp_decrypt_post_error -#undef _ -}; - -extern vlib_node_registration_t dpdk_esp4_decrypt_post_node; -extern vlib_node_registration_t dpdk_esp6_decrypt_post_node; - -static u8 * -format_esp_decrypt_post_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); - u32 indent = format_get_indent (s); - - s = format (s, "cipher %U auth %U\n", - format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); - - ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; - if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) - s = - format (s, "%U%U", format_white_space, indent, format_ip6_header, ih4); - else - s = - format (s, "%U%U", format_white_space, indent, format_ip4_header, ih4); - - return s; -} - -always_inline uword -dpdk_esp_decrypt_post_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame, int is_ip6) -{ - u32 n_left_from, *from, *to_next = 0, next_index; - ipsec_sa_t *sa0; - u32 sa_index0 = ~0; - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - esp_footer_t *f0; - u32 bi0, iv_size, next0; - vlib_buffer_t *b0 = 0; - ip4_header_t *ih4 = 0, *oh4 = 0; - ip6_header_t *ih6 = 0, *oh6 = 0; - crypto_alg_t *cipher_alg, *auth_alg; - esp_header_t *esp0; - u8 trunc_size, is_aead; - u16 udp_encap_adv = 0; - - next0 = ESP_DECRYPT_NEXT_DROP; - - bi0 = from[0]; - from += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - esp0 = vlib_buffer_get_current (b0); - - sa_index0 = vnet_buffer (b0)->ipsec.sad_index; - sa0 = ipsec_sa_get (sa_index0); - - to_next[0] = bi0; - to_next += 1; - - cipher_alg = vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); - auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); - is_aead = cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD; - if (is_aead) - auth_alg = cipher_alg; - - trunc_size = auth_alg->trunc_size; - - iv_size = cipher_alg->iv_len; - - ipsec_sa_anti_replay_advance (sa0, - clib_host_to_net_u32 (esp0->seq)); - - /* if UDP encapsulation is used adjust the address of the IP header */ - if (ipsec_sa_is_set_UDP_ENCAP (sa0) - && (b0->flags & VNET_BUFFER_F_IS_IP4)) - { - udp_encap_adv = sizeof (udp_header_t); - } - - if (b0->flags & VNET_BUFFER_F_IS_IP4) - ih4 = (ip4_header_t *) - ((u8 *) esp0 - udp_encap_adv - sizeof (ip4_header_t)); - else - ih4 = (ip4_header_t *) ((u8 *) esp0 - sizeof (ip6_header_t)); - - vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size); - - b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; - f0 = (esp_footer_t *) (vlib_buffer_get_tail (b0) - trunc_size - 2); - b0->current_length -= (f0->pad_length + trunc_size + 2); -#if 0 - /* check padding */ - const u8 *padding = vlib_buffer_get_tail (b0); - if (PREDICT_FALSE (memcmp (padding, pad_data, f0->pad_length))) - { - clib_warning ("bad padding"); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - goto trace; - } -#endif - if (ipsec_sa_is_set_IS_TUNNEL (sa0)) - { - if (f0->next_header == IP_PROTOCOL_IP_IN_IP) - next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - else if (f0->next_header == IP_PROTOCOL_IPV6) - next0 = ESP_DECRYPT_NEXT_IP6_INPUT; - else - { - clib_warning ("next header: 0x%x", f0->next_header); - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - goto trace; - } - } - else /* transport mode */ - { - if ((ih4->ip_version_and_header_length & 0xF0) == 0x40) - { - u16 ih4_len = ip4_header_bytes (ih4); - vlib_buffer_advance (b0, -ih4_len); - next0 = ESP_DECRYPT_NEXT_IP4_INPUT; - - oh4 = vlib_buffer_get_current (b0); - memmove (oh4, ih4, ih4_len); - oh4->protocol = f0->next_header; - oh4->length = clib_host_to_net_u16 (b0->current_length); - oh4->checksum = ip4_header_checksum (oh4); - } - else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) - { - ih6 = (ip6_header_t *) ih4; - vlib_buffer_advance (b0, -sizeof (ip6_header_t)); - oh6 = vlib_buffer_get_current (b0); - memmove (oh6, ih6, sizeof (ip6_header_t)); - - next0 = ESP_DECRYPT_NEXT_IP6_INPUT; - oh6->protocol = f0->next_header; - u16 len = b0->current_length - sizeof (ip6_header_t); - oh6->payload_length = clib_host_to_net_u16 (len); - } - else - { - clib_warning ("next header: 0x%x", f0->next_header); - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); - goto trace; - } - } - - vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - - trace: - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - esp_decrypt_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->crypto_alg = sa0->crypto_alg; - tr->integ_alg = sa0->integ_alg; - ih4 = vlib_buffer_get_current (b0); - clib_memcpy_fast (tr->packet_data, ih4, sizeof (ip6_header_t)); - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, bi0, - next0); - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - if (is_ip6) - vlib_node_increment_counter (vm, dpdk_esp6_decrypt_post_node.index, - ESP_DECRYPT_POST_ERROR_PKTS, - from_frame->n_vectors); - else - vlib_node_increment_counter (vm, dpdk_esp4_decrypt_post_node.index, - ESP_DECRYPT_POST_ERROR_PKTS, - from_frame->n_vectors); - - return from_frame->n_vectors; -} - -VLIB_NODE_FN (dpdk_esp4_decrypt_post_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node) = { - .name = "dpdk-esp4-decrypt-post", - .vector_size = sizeof (u32), - .format_trace = format_esp_decrypt_post_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), - .error_strings = esp_decrypt_post_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (dpdk_esp6_decrypt_post_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node) = { - .name = "dpdk-esp6-decrypt-post", - .vector_size = sizeof (u32), - .format_trace = format_esp_decrypt_post_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), - .error_strings = esp_decrypt_post_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, -}; -/* *INDENT-ON* */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c deleted file mode 100644 index 157c93f417e..00000000000 --- a/src/plugins/dpdk/ipsec/esp_encrypt.c +++ /dev/null @@ -1,710 +0,0 @@ -/* - * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev - * - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define foreach_esp_encrypt_next \ -_(DROP, "error-drop") \ -_(IP4_LOOKUP, "ip4-lookup") \ -_(IP6_LOOKUP, "ip6-lookup") \ -_(INTERFACE_OUTPUT, "interface-output") - -#define _(v, s) ESP_ENCRYPT_NEXT_##v, -typedef enum -{ - foreach_esp_encrypt_next -#undef _ - ESP_ENCRYPT_N_NEXT, -} esp_encrypt_next_t; - -#define foreach_esp_encrypt_error \ - _(RX_PKTS, "ESP pkts received") \ - _(SEQ_CYCLED, "Sequence number cycled") \ - _(ENQ_FAIL, "Enqueue encrypt failed (queue full)") \ - _(DISCARD, "Not enough crypto operations") \ - _(SESSION, "Failed to get crypto session") \ - _(NOSUP, "Cipher/Auth not supported") - - -typedef enum -{ -#define _(sym,str) ESP_ENCRYPT_ERROR_##sym, - foreach_esp_encrypt_error -#undef _ - ESP_ENCRYPT_N_ERROR, -} esp_encrypt_error_t; - -static char *esp_encrypt_error_strings[] = { -#define _(sym,string) string, - foreach_esp_encrypt_error -#undef _ -}; - -extern vlib_node_registration_t dpdk_esp4_encrypt_node; -extern vlib_node_registration_t dpdk_esp6_encrypt_node; -extern vlib_node_registration_t dpdk_esp4_encrypt_tun_node; -extern vlib_node_registration_t dpdk_esp6_encrypt_tun_node; - -typedef struct -{ - ipsec_crypto_alg_t crypto_alg; - ipsec_integ_alg_t integ_alg; - u8 packet_data[64]; -} esp_encrypt_trace_t; - -/* packet trace format function */ -static u8 * -format_esp_encrypt_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); - ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; - u32 indent = format_get_indent (s), offset; - - s = format (s, "cipher %U auth %U\n", - format_ipsec_crypto_alg, t->crypto_alg, - format_ipsec_integ_alg, t->integ_alg); - - if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) - { - s = format (s, "%U%U", format_white_space, indent, - format_ip6_header, ih4); - offset = sizeof (ip6_header_t); - } - else - { - s = format (s, "%U%U", format_white_space, indent, - format_ip4_header, ih4); - offset = ip4_header_bytes (ih4); - } - - s = format (s, "\n%U%U", format_white_space, indent, - format_esp_header, t->packet_data + offset); - - return s; -} - -always_inline uword -dpdk_esp_encrypt_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame, int is_ip6, int is_tun) -{ - u32 n_left_from, *from, *to_next, next_index, thread_index; - ipsec_main_t *im = &ipsec_main; - vnet_main_t *vnm = im->vnet_main; - vnet_interface_main_t *vim = &vnm->interface_main; - u32 thread_idx = vlib_get_thread_index (); - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_resource_t *res = 0; - ipsec_sa_t *sa0 = 0; - crypto_alg_t *cipher_alg = 0, *auth_alg = 0; - struct rte_cryptodev_sym_session *session = 0; - u32 ret, last_sa_index = ~0; - u8 numa = rte_socket_id (); - u8 is_aead = 0; - crypto_worker_main_t *cwm = - vec_elt_at_index (dcm->workers_main, thread_idx); - struct rte_crypto_op **ops = cwm->ops; - - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - thread_index = vm->thread_index; - - ret = crypto_alloc_ops (numa, ops, n_left_from); - if (ret) - { - if (is_ip6) - vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, - ESP_ENCRYPT_ERROR_DISCARD, n_left_from); - else - vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, - ESP_ENCRYPT_ERROR_DISCARD, n_left_from); - /* Discard whole frame */ - vlib_buffer_free (vm, from, n_left_from); - return n_left_from; - } - - next_index = ESP_ENCRYPT_NEXT_DROP; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - clib_error_t *error; - u32 bi0, bi1; - vlib_buffer_t *b0, *b1; - u32 sa_index0; - ip4_and_esp_header_t *ih0, *oh0 = 0; - ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; - ip4_and_udp_and_esp_header_t *ouh0 = 0; - esp_header_t *esp0; - esp_footer_t *f0; - u8 next_hdr_type; - u32 iv_size; - u16 orig_sz; - u8 trunc_size; - u16 rewrite_len; - u16 udp_encap_adv = 0; - struct rte_mbuf *mb0; - struct rte_crypto_op *op; - u16 res_idx; - - bi0 = from[0]; - from += 1; - n_left_from -= 1; - - b0 = vlib_get_buffer (vm, bi0); - ih0 = vlib_buffer_get_current (b0); - mb0 = rte_mbuf_from_vlib_buffer (b0); - - /* ih0/ih6_0 */ - CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD); - /* f0 */ - CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE); - /* mb0 */ - CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); - - if (n_left_from > 1) - { - bi1 = from[1]; - b1 = vlib_get_buffer (vm, bi1); - - CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (b1->data - CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES, STORE); - } - - op = ops[0]; - ops += 1; - ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); - - dpdk_op_priv_t *priv = crypto_op_get_priv (op); - /* store bi in op private */ - priv->bi = bi0; - priv->encrypt = 1; - - u16 op_len = - sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); - CLIB_PREFETCH (op, op_len, STORE); - - if (is_tun) - { - /* we are on a ipsec tunnel's feature arc */ - vnet_buffer (b0)->ipsec.sad_index = - sa_index0 = ipsec_tun_protect_get_sa_out - (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); - } - else - sa_index0 = vnet_buffer (b0)->ipsec.sad_index; - - if (sa_index0 != last_sa_index) - { - sa0 = ipsec_sa_get (sa_index0); - - cipher_alg = - vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); - auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); - - is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); - - if (is_aead) - auth_alg = cipher_alg; - - res_idx = get_resource (cwm, sa0); - - if (PREDICT_FALSE (res_idx == (u16) ~ 0)) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_encrypt_node.index, - ESP_ENCRYPT_ERROR_NOSUP, 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_encrypt_node.index, - ESP_ENCRYPT_ERROR_NOSUP, 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - res = vec_elt_at_index (dcm->resource, res_idx); - - error = crypto_get_session (&session, sa_index0, res, cwm, 1); - if (PREDICT_FALSE (error || !session)) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_encrypt_node.index, - ESP_ENCRYPT_ERROR_SESSION, - 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_encrypt_node.index, - ESP_ENCRYPT_ERROR_SESSION, - 1); - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - - last_sa_index = sa_index0; - } - - if (PREDICT_FALSE (esp_seq_advance (sa0))) - { - if (is_ip6) - vlib_node_increment_counter (vm, - dpdk_esp6_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); - else - vlib_node_increment_counter (vm, - dpdk_esp4_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); - //TODO: rekey SA - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - goto trace; - } - - orig_sz = b0->current_length; - - /* TODO multi-seg support - total_length_not_including_first_buffer */ - vlib_increment_combined_counter - (&ipsec_sa_counters, thread_index, sa_index0, - 1, b0->current_length); - - /* Update tunnel interface tx counters */ - if (is_tun) - vlib_increment_combined_counter - (vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - thread_index, vnet_buffer (b0)->sw_if_index[VLIB_TX], - 1, b0->current_length); - - res->ops[res->n_ops] = op; - res->bi[res->n_ops] = bi0; - res->n_ops += 1; - - dpdk_gcm_cnt_blk *icb = &priv->cb; - - crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); - - iv_size = cipher_alg->iv_len; - trunc_size = auth_alg->trunc_size; - - /* if UDP encapsulation is used adjust the address of the IP header */ - if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6) - udp_encap_adv = sizeof (udp_header_t); - - if (ipsec_sa_is_set_IS_TUNNEL (sa0)) - { - rewrite_len = 0; - if (!ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) /* ip4 */ - { - /* in tunnel mode send it back to FIB */ - priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP; - u8 adv = sizeof (ip4_header_t) + udp_encap_adv + - sizeof (esp_header_t) + iv_size; - vlib_buffer_advance (b0, -adv); - oh0 = vlib_buffer_get_current (b0); - ouh0 = vlib_buffer_get_current (b0); - next_hdr_type = (is_ip6 ? - IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); - /* - * oh0->ip4.ip_version_and_header_length = 0x45; - * oh0->ip4.tos = ih0->ip4.tos; - * oh0->ip4.fragment_id = 0; - * oh0->ip4.flags_and_fragment_offset = 0; - */ - oh0->ip4.checksum_data_64[0] = - clib_host_to_net_u64 (0x45ULL << 56); - /* - * oh0->ip4.ttl = 254; - * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; - */ - oh0->ip4.checksum_data_32[2] = - clib_host_to_net_u32 (0xfe320000); - - oh0->ip4.src_address.as_u32 = - sa0->tunnel.t_src.ip.ip4.as_u32; - oh0->ip4.dst_address.as_u32 = - sa0->tunnel.t_dst.ip.ip4.as_u32; - - if (ipsec_sa_is_set_UDP_ENCAP (sa0)) - { - oh0->ip4.protocol = IP_PROTOCOL_UDP; - esp0 = &ouh0->esp; - } - else - esp0 = &oh0->esp; - esp0->spi = clib_host_to_net_u32 (sa0->spi); - esp0->seq = clib_host_to_net_u32 (sa0->seq); - } - else - { - /* ip6 */ - /* in tunnel mode send it back to FIB */ - priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP; - - u8 adv = - sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size; - vlib_buffer_advance (b0, -adv); - ih6_0 = (ip6_and_esp_header_t *) ih0; - oh6_0 = vlib_buffer_get_current (b0); - - next_hdr_type = (is_ip6 ? - IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); - - oh6_0->ip6.ip_version_traffic_class_and_flow_label = - ih6_0->ip6.ip_version_traffic_class_and_flow_label; - - oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; - oh6_0->ip6.hop_limit = 254; - oh6_0->ip6.src_address.as_u64[0] = - sa0->tunnel.t_src.ip.ip6.as_u64[0]; - oh6_0->ip6.src_address.as_u64[1] = - sa0->tunnel.t_src.ip.ip6.as_u64[1]; - oh6_0->ip6.dst_address.as_u64[0] = - sa0->tunnel.t_dst.ip.ip6.as_u64[0]; - oh6_0->ip6.dst_address.as_u64[1] = - sa0->tunnel.t_dst.ip.ip6.as_u64[1]; - esp0 = &oh6_0->esp; - oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi); - oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq); - } - - vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - } - else /* transport mode */ - { - if (is_tun) - { - rewrite_len = 0; - priv->next = DPDK_CRYPTO_INPUT_NEXT_MIDCHAIN; - } - else - { - priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT; - rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length; - } - u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv; - vlib_buffer_advance (b0, -adv - rewrite_len); - u8 *src = ((u8 *) ih0) - rewrite_len; - u8 *dst = vlib_buffer_get_current (b0); - oh0 = vlib_buffer_get_current (b0) + rewrite_len; - ouh0 = vlib_buffer_get_current (b0) + rewrite_len; - - if (is_ip6) - { - orig_sz -= sizeof (ip6_header_t); - ih6_0 = (ip6_and_esp_header_t *) ih0; - next_hdr_type = ih6_0->ip6.protocol; - memmove (dst, src, rewrite_len + sizeof (ip6_header_t)); - oh6_0 = (ip6_and_esp_header_t *) oh0; - oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; - esp0 = &oh6_0->esp; - } - else /* ipv4 */ - { - u16 ip_size = ip4_header_bytes (&ih0->ip4); - orig_sz -= ip_size; - next_hdr_type = ih0->ip4.protocol; - memmove (dst, src, rewrite_len + ip_size); - oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; - esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); - if (ipsec_sa_is_set_UDP_ENCAP (sa0)) - { - oh0->ip4.protocol = IP_PROTOCOL_UDP; - esp0 = (esp_header_t *) - (((u8 *) oh0) + ip_size + udp_encap_adv); - } - else - { - oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; - esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); - } - } - esp0->spi = clib_host_to_net_u32 (sa0->spi); - esp0->seq = clib_host_to_net_u32 (sa0->seq); - } - - if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) - { - ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); - ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); - ouh0->udp.checksum = 0; - } - ASSERT (is_pow2 (cipher_alg->boundary)); - u16 mask = cipher_alg->boundary - 1; - u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask; - u8 pad_bytes = pad_payload_len - 2 - orig_sz; - - u8 *padding = - vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size); - - /* The extra pad bytes would be overwritten by the digest */ - if (pad_bytes) - clib_memcpy_fast (padding, pad_data, 16); - - f0 = (esp_footer_t *) (padding + pad_bytes); - f0->pad_length = pad_bytes; - f0->next_header = next_hdr_type; - - if (oh6_0) - { - u16 len = b0->current_length - sizeof (ip6_header_t); - oh6_0->ip6.payload_length = - clib_host_to_net_u16 (len - rewrite_len); - } - else if (oh0) - { - oh0->ip4.length = - clib_host_to_net_u16 (b0->current_length - rewrite_len); - oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); - if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) - { - ouh0->udp.length = - clib_host_to_net_u16 (clib_net_to_host_u16 - (ouh0->ip4.length) - - ip4_header_bytes (&ouh0->ip4)); - } - } - else /* should never happen */ - clib_warning ("No outer header found for ESP packet"); - - b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; - - /* mbuf packet starts at ESP header */ - mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); - mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); - mb0->data_off = ((void *) esp0) - mb0->buf_addr; - - u32 cipher_off, cipher_len, auth_len = 0; - u32 *aad = NULL; - - u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; - u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); - - if (!is_aead && (cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC || - cipher_alg->alg == RTE_CRYPTO_CIPHER_NULL)) - { - cipher_off = sizeof (esp_header_t); - cipher_len = iv_size + pad_payload_len; - } - else /* CTR/GCM */ - { - u32 *esp_iv = (u32 *) (esp0 + 1); - esp_iv[0] = sa0->seq; - esp_iv[1] = sa0->seq_hi; - - cipher_off = sizeof (esp_header_t) + iv_size; - cipher_len = pad_payload_len; - } - - if (is_aead) - { - aad = (u32 *) priv->aad; - aad[0] = esp0->spi; - - /* aad[3] should always be 0 */ - if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) - { - aad[1] = clib_host_to_net_u32 (sa0->seq_hi); - aad[2] = esp0->seq; - } - else - { - aad[1] = esp0->seq; - aad[2] = 0; - } - } - else - { - auth_len = - vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size; - if (ipsec_sa_is_set_USE_ESN (sa0)) - { - u32 *_digest = (u32 *) digest; - _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); - auth_len += 4; - } - } - - crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, - 0, auth_len, (u8 *) aad, digest, digest_paddr); - - trace: - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - esp_encrypt_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->crypto_alg = sa0->crypto_alg; - tr->integ_alg = sa0->integ_alg; - u8 *p = vlib_buffer_get_current (b0); - if (!ipsec_sa_is_set_IS_TUNNEL (sa0) && !is_tun) - p += vnet_buffer (b0)->ip.save_rewrite_length; - clib_memcpy_fast (tr->packet_data, p, sizeof (tr->packet_data)); - } - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - if (is_ip6) - { - vlib_node_increment_counter (vm, - (is_tun ? - dpdk_esp6_encrypt_tun_node.index : - dpdk_esp6_encrypt_node.index), - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - - crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index, - ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); - } - else - { - vlib_node_increment_counter (vm, - (is_tun ? - dpdk_esp4_encrypt_tun_node.index : - dpdk_esp4_encrypt_node.index), - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); - - crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index, - ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); - } - - crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); - - return from_frame->n_vectors; -} - -VLIB_NODE_FN (dpdk_esp4_encrypt_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = { - .name = "dpdk-esp4-encrypt", - .flags = VLIB_NODE_FLAG_IS_OUTPUT, - .vector_size = sizeof (u32), - .format_trace = format_esp_encrypt_trace, - .n_errors = ARRAY_LEN (esp_encrypt_error_strings), - .error_strings = esp_encrypt_error_strings, - .n_next_nodes = 1, - .next_nodes = - { - [ESP_ENCRYPT_NEXT_DROP] = "error-drop", - } -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (dpdk_esp6_encrypt_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = { - .name = "dpdk-esp6-encrypt", - .flags = VLIB_NODE_FLAG_IS_OUTPUT, - .vector_size = sizeof (u32), - .format_trace = format_esp_encrypt_trace, - .n_errors = ARRAY_LEN (esp_encrypt_error_strings), - .error_strings = esp_encrypt_error_strings, - .n_next_nodes = 1, - .next_nodes = - { - [ESP_ENCRYPT_NEXT_DROP] = "error-drop", - } -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = { - .name = "dpdk-esp4-encrypt-tun", - .flags = VLIB_NODE_FLAG_IS_OUTPUT, - .vector_size = sizeof (u32), - .format_trace = format_esp_encrypt_trace, - .n_errors = ARRAY_LEN (esp_encrypt_error_strings), - .error_strings = esp_encrypt_error_strings, - .n_next_nodes = 1, - .next_nodes = - { - [ESP_ENCRYPT_NEXT_DROP] = "error-drop", - } -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) -{ - return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = { - .name = "dpdk-esp6-encrypt-tun", - .flags = VLIB_NODE_FLAG_IS_OUTPUT, - .vector_size = sizeof (u32), - .format_trace = format_esp_encrypt_trace, - .n_errors = ARRAY_LEN (esp_encrypt_error_strings), - .error_strings = esp_encrypt_error_strings, - .n_next_nodes = 1, - .next_nodes = - { - [ESP_ENCRYPT_NEXT_DROP] = "error-drop", - } -}; -/* *INDENT-ON* */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c deleted file mode 100644 index e260ba7dcc4..00000000000 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ /dev/null @@ -1,1087 +0,0 @@ -/* - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -dpdk_crypto_main_t dpdk_crypto_main; - -#define EMPTY_STRUCT {0} -#define NUM_CRYPTO_MBUFS 16384 - -static void -algos_init (u32 n_mains) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *a; - - vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8); - - { -#define _(v,f,str) \ - dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \ - dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains; - foreach_ipsec_crypto_alg -#undef _ - } - - /* Minimum boundary for ciphers is 4B, required by ESP */ - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_NULL; - a->boundary = 4; /* 1 */ - a->key_len = 0; - a->iv_len = 0; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CBC; - a->boundary = 16; - a->key_len = 16; - a->iv_len = 16; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CBC; - a->boundary = 16; - a->key_len = 24; - a->iv_len = 16; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CBC; - a->boundary = 16; - a->key_len = 32; - a->iv_len = 16; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CTR; - a->boundary = 4; /* 1 */ - a->key_len = 16; - a->iv_len = 8; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CTR; - a->boundary = 4; /* 1 */ - a->key_len = 24; - a->iv_len = 8; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256]; - a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - a->alg = RTE_CRYPTO_CIPHER_AES_CTR; - a->boundary = 4; /* 1 */ - a->key_len = 32; - a->iv_len = 8; - -#define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD -#define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; - a->type = AES_GCM_TYPE; - a->alg = AES_GCM_ALG; - a->boundary = 4; /* 1 */ - a->key_len = 16; - a->iv_len = 8; - a->trunc_size = 16; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192]; - a->type = AES_GCM_TYPE; - a->alg = AES_GCM_ALG; - a->boundary = 4; /* 1 */ - a->key_len = 24; - a->iv_len = 8; - a->trunc_size = 16; - - a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256]; - a->type = AES_GCM_TYPE; - a->alg = AES_GCM_ALG; - a->boundary = 4; /* 1 */ - a->key_len = 32; - a->iv_len = 8; - a->trunc_size = 16; - - vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1); - - { -#define _(v,f,str) \ - dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \ - dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains; - foreach_ipsec_integ_alg -#undef _ - } - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_NULL; - a->key_len = 0; - a->trunc_size = 0; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_MD5_HMAC; - a->key_len = 16; - a->trunc_size = 12; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC; - a->key_len = 20; - a->trunc_size = 12; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; - a->key_len = 32; - a->trunc_size = 12; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; - a->key_len = 32; - a->trunc_size = 16; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC; - a->key_len = 48; - a->trunc_size = 24; - - a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256]; - a->type = RTE_CRYPTO_SYM_XFORM_AUTH; - a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC; - a->key_len = 64; - a->trunc_size = 32; -} - -static u8 -cipher_alg_index (const crypto_alg_t * alg) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - - return (alg - dcm->cipher_algs); -} - -static u8 -auth_alg_index (const crypto_alg_t * alg) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - - return (alg - dcm->auth_algs); -} - -static crypto_alg_t * -cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *alg; - - if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) - return NULL; - - /* *INDENT-OFF* */ - vec_foreach (alg, dcm->cipher_algs) - { - if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) && - (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && - (cap->sym.cipher.algo == alg->alg) && - (alg->key_len == key_len)) - return alg; - if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) && - (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) && - (cap->sym.aead.algo == alg->alg) && - (alg->key_len == key_len)) - return alg; - } - /* *INDENT-ON* */ - - return NULL; -} - -static crypto_alg_t * -auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *alg; - - if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) || - (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)) - return NULL; - - /* *INDENT-OFF* */ - vec_foreach (alg, dcm->auth_algs) - { - if ((cap->sym.auth.algo == alg->alg) && - (alg->trunc_size == trunc_size)) - return alg; - } - /* *INDENT-ON* */ - - return NULL; -} - -static void -crypto_set_aead_xform (struct rte_crypto_sym_xform *xform, - ipsec_sa_t * sa, u8 is_outbound) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *c; - - c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); - - ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_AEAD); - - xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; - xform->aead.algo = c->alg; - xform->aead.key.data = sa->crypto_key.data; - xform->aead.key.length = c->key_len; - xform->aead.iv.offset = - crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); - xform->aead.iv.length = 12; - xform->aead.digest_length = c->trunc_size; - xform->aead.aad_length = ipsec_sa_is_set_USE_ESN (sa) ? 12 : 8; - xform->next = NULL; - - if (is_outbound) - xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; - else - xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; -} - -static void -crypto_set_cipher_xform (struct rte_crypto_sym_xform *xform, - ipsec_sa_t * sa, u8 is_outbound) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *c; - - c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); - - ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_CIPHER); - - xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - xform->cipher.algo = c->alg; - xform->cipher.key.data = sa->crypto_key.data; - xform->cipher.key.length = c->key_len; - xform->cipher.iv.offset = - crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); - xform->cipher.iv.length = c->iv_len; - xform->next = NULL; - - if (is_outbound) - xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; - else - xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; -} - -static void -crypto_set_auth_xform (struct rte_crypto_sym_xform *xform, - ipsec_sa_t * sa, u8 is_outbound) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *a; - - a = vec_elt_at_index (dcm->auth_algs, sa->integ_alg); - - ASSERT (a->type == RTE_CRYPTO_SYM_XFORM_AUTH); - - xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; - xform->auth.algo = a->alg; - xform->auth.key.data = sa->integ_key.data; - xform->auth.key.length = a->key_len; - xform->auth.digest_length = a->trunc_size; - xform->next = NULL; - - if (is_outbound) - xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; - else - xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; -} - -clib_error_t * -create_sym_session (struct rte_cryptodev_sym_session **session, - u32 sa_idx, - crypto_resource_t * res, - crypto_worker_main_t * cwm, u8 is_outbound) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - ipsec_sa_t *sa; - struct rte_crypto_sym_xform cipher_xform = { 0 }; - struct rte_crypto_sym_xform auth_xform = { 0 }; - struct rte_crypto_sym_xform *xfs; - struct rte_cryptodev_sym_session **s; - clib_error_t *error = 0; - - sa = ipsec_sa_get (sa_idx); - - if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) | - (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) | - (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)) - { - crypto_set_aead_xform (&cipher_xform, sa, is_outbound); - xfs = &cipher_xform; - } - else - { - crypto_set_cipher_xform (&cipher_xform, sa, is_outbound); - crypto_set_auth_xform (&auth_xform, sa, is_outbound); - - if (is_outbound) - { - cipher_xform.next = &auth_xform; - xfs = &cipher_xform; - } - else - { - auth_xform.next = &cipher_xform; - xfs = &auth_xform; - } - } - - data = vec_elt_at_index (dcm->data, res->numa); - clib_spinlock_lock_if_init (&data->lockp); - - /* - * DPDK_VER >= 1708: - * Multiple worker/threads share the session for an SA - * Single session per SA, initialized for each device driver - */ - s = (void *) hash_get (data->session_by_sa_index, sa_idx); - - if (!s) - { - session[0] = rte_cryptodev_sym_session_create (data->session_h); - if (!session[0]) - { - data->session_h_failed += 1; - error = clib_error_return (0, "failed to create session header"); - goto done; - } - hash_set (data->session_by_sa_index, sa_idx, session[0]); - } - else - session[0] = s[0]; - - struct rte_mempool **mp; - mp = vec_elt_at_index (data->session_drv, res->drv_id); - ASSERT (mp[0] != NULL); - - i32 ret = - rte_cryptodev_sym_session_init (res->dev_id, session[0], xfs, mp[0]); - if (ret) - { - data->session_drv_failed[res->drv_id] += 1; - error = clib_error_return (0, "failed to init session for drv %u", - res->drv_id); - goto done; - } - - add_session_by_drv_and_sa_idx (session[0], data, res->drv_id, sa_idx); - -done: - clib_spinlock_unlock_if_init (&data->lockp); - return error; -} - -static void __attribute__ ((unused)) clear_and_free_obj (void *obj) -{ - struct rte_mempool *mp = rte_mempool_from_obj (obj); - - clib_memset (obj, 0, mp->elt_size); - - rte_mempool_put (mp, obj); -} - -/* This is from rte_cryptodev_pmd.h */ -static inline void * -get_session_private_data (const struct rte_cryptodev_sym_session *sess, - uint8_t driver_id) -{ -#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) - return sess->sess_private_data[driver_id]; -#else - if (unlikely (sess->nb_drivers <= driver_id)) - return 0; - - return sess->sess_data[driver_id].data; -#endif -} - -/* This is from rte_cryptodev_pmd.h */ -static inline void -set_session_private_data (struct rte_cryptodev_sym_session *sess, - uint8_t driver_id, void *private_data) -{ -#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) - sess->sess_private_data[driver_id] = private_data; -#else - if (unlikely (sess->nb_drivers <= driver_id)) - return; - sess->sess_data[driver_id].data = private_data; -#endif -} - -static clib_error_t * -dpdk_crypto_session_disposal (crypto_session_disposal_t * v, u64 ts) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_session_disposal_t *s; - void *drv_session; - u32 drv_id; - i32 ret; - - /* *INDENT-OFF* */ - vec_foreach (s, v) - { - /* ordered vector by timestamp */ - if (!(s->ts + dcm->session_timeout < ts)) - break; - - vec_foreach_index (drv_id, dcm->drv) - { - drv_session = get_session_private_data (s->session, drv_id); - if (!drv_session) - continue; - - /* - * Custom clear to avoid finding a dev_id for drv_id: - * ret = rte_cryptodev_sym_session_clear (dev_id, drv_session); - * ASSERT (!ret); - */ - clear_and_free_obj (drv_session); - - set_session_private_data (s->session, drv_id, NULL); - } - - if (rte_mempool_from_obj(s->session)) - { - ret = rte_cryptodev_sym_session_free (s->session); - ASSERT (!ret); - } - } - /* *INDENT-ON* */ - - if (s < vec_end (v)) - vec_delete (v, s - v, 0); - else - vec_reset_length (v); - - return 0; -} - -static clib_error_t * -add_del_sa_session (u32 sa_index, u8 is_add) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - struct rte_cryptodev_sym_session *s; - uword *val; - u32 drv_id; - - if (is_add) - return 0; - - /* *INDENT-OFF* */ - vec_foreach (data, dcm->data) - { - clib_spinlock_lock_if_init (&data->lockp); - val = hash_get (data->session_by_sa_index, sa_index); - if (val) - { - s = (struct rte_cryptodev_sym_session *) val[0]; - vec_foreach_index (drv_id, dcm->drv) - { - val = (uword*) get_session_by_drv_and_sa_idx (data, drv_id, sa_index); - if (val) - add_session_by_drv_and_sa_idx(NULL, data, drv_id, sa_index); - } - - hash_unset (data->session_by_sa_index, sa_index); - - u64 ts = unix_time_now_nsec (); - dpdk_crypto_session_disposal (data->session_disposal, ts); - - crypto_session_disposal_t sd; - sd.ts = ts; - sd.session = s; - - vec_add1 (data->session_disposal, sd); - } - clib_spinlock_unlock_if_init (&data->lockp); - } - /* *INDENT-ON* */ - - return 0; -} - -static clib_error_t * -dpdk_ipsec_check_support (ipsec_sa_t * sa) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - - if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) - switch (sa->crypto_alg) - { - case IPSEC_CRYPTO_ALG_NONE: - case IPSEC_CRYPTO_ALG_AES_GCM_128: - case IPSEC_CRYPTO_ALG_AES_GCM_192: - case IPSEC_CRYPTO_ALG_AES_GCM_256: - break; - default: - return clib_error_return (0, "unsupported integ-alg %U crypto-alg %U", - format_ipsec_integ_alg, sa->integ_alg, - format_ipsec_crypto_alg, sa->crypto_alg); - } - - /* XXX do we need the NONE check? */ - if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE && - dcm->cipher_algs[sa->crypto_alg].disabled) - return clib_error_return (0, "disabled crypto-alg %U", - format_ipsec_crypto_alg, sa->crypto_alg); - - /* XXX do we need the NONE check? */ - if (sa->integ_alg != IPSEC_INTEG_ALG_NONE && - dcm->auth_algs[sa->integ_alg].disabled) - return clib_error_return (0, "disabled integ-alg %U", - format_ipsec_integ_alg, sa->integ_alg); - return NULL; -} - -static void -crypto_parse_capabilities (crypto_dev_t * dev, - const struct rte_cryptodev_capabilities *cap, - u32 n_mains) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_alg_t *alg; - u8 len, inc; - - for (; cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; cap++) - { - /* A single capability maps to multiple cipher/auth algorithms */ - switch (cap->sym.xform_type) - { - case RTE_CRYPTO_SYM_XFORM_AEAD: - case RTE_CRYPTO_SYM_XFORM_CIPHER: - inc = cap->sym.cipher.key_size.increment; - inc = inc ? inc : 1; - for (len = cap->sym.cipher.key_size.min; - len <= cap->sym.cipher.key_size.max; len += inc) - { - alg = cipher_cap_to_alg (cap, len); - if (!alg) - continue; - dev->cipher_support[cipher_alg_index (alg)] = 1; - alg->resources += vec_len (dev->free_resources); - /* At least enough resources to support one algo */ - dcm->enabled |= (alg->resources >= n_mains); - } - break; - case RTE_CRYPTO_SYM_XFORM_AUTH: - inc = cap->sym.auth.digest_size.increment; - inc = inc ? inc : 1; - for (len = cap->sym.auth.digest_size.min; - len <= cap->sym.auth.digest_size.max; len += inc) - { - alg = auth_cap_to_alg (cap, len); - if (!alg) - continue; - dev->auth_support[auth_alg_index (alg)] = 1; - alg->resources += vec_len (dev->free_resources); - /* At least enough resources to support one algo */ - dcm->enabled |= (alg->resources >= n_mains); - } - break; - default: - ; - } - } -} - -static clib_error_t * -crypto_dev_conf (u8 dev, u16 n_qp, u8 numa) -{ - struct rte_cryptodev_config dev_conf = { 0 }; - struct rte_cryptodev_qp_conf qp_conf = { 0 }; - i32 ret; - u16 qp; - char *error_str; - - dev_conf.socket_id = numa; - dev_conf.nb_queue_pairs = n_qp; - - error_str = "failed to configure crypto device %u"; - ret = rte_cryptodev_configure (dev, &dev_conf); - if (ret < 0) - return clib_error_return (0, error_str, dev); - - error_str = "failed to setup crypto device %u queue pair %u"; - qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; - for (qp = 0; qp < n_qp; qp++) - { -#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) - ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa, NULL); -#else - ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa); -#endif - if (ret < 0) - return clib_error_return (0, error_str, dev, qp); - } - - error_str = "failed to start crypto device %u"; - if (rte_cryptodev_start (dev)) - return clib_error_return (0, error_str, dev); - - return 0; -} - -static void -crypto_scan_devs (u32 n_mains) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - struct rte_cryptodev *cryptodev; - struct rte_cryptodev_info info = { 0 }; - crypto_dev_t *dev; - crypto_resource_t *res; - clib_error_t *error; - u32 i; - u16 max_res_idx, res_idx, j; - u8 drv_id; - - vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1, - (crypto_dev_t) EMPTY_STRUCT); - - for (i = 0; i < rte_cryptodev_count (); i++) - { - dev = vec_elt_at_index (dcm->dev, i); - - cryptodev = &rte_cryptodevs[i]; - rte_cryptodev_info_get (i, &info); - - dev->id = i; - dev->name = cryptodev->data->name; - dev->numa = rte_cryptodev_socket_id (i); - dev->features = info.feature_flags; - dev->max_qp = info.max_nb_queue_pairs; - drv_id = info.driver_id; - if (drv_id >= vec_len (dcm->drv)) - vec_validate_init_empty (dcm->drv, drv_id, - (crypto_drv_t) EMPTY_STRUCT); - vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name; - dev->drv_id = drv_id; - vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i); - - if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) - continue; - - if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa))) - { - clib_error_report (error); - continue; - } - - max_res_idx = dev->max_qp - 1; - - vec_validate (dev->free_resources, max_res_idx); - - res_idx = vec_len (dcm->resource); - vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx, - (crypto_resource_t) EMPTY_STRUCT, - CLIB_CACHE_LINE_BYTES); - - for (j = 0; j <= max_res_idx; j++) - { - vec_elt (dev->free_resources, max_res_idx - j) = res_idx + j; - res = &dcm->resource[res_idx + j]; - res->dev_id = i; - res->drv_id = drv_id; - res->qp_id = j; - res->numa = dev->numa; - res->thread_idx = (u16) ~ 0; - } - - crypto_parse_capabilities (dev, info.capabilities, n_mains); - } -} - -void -crypto_auto_placement (void) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_resource_t *res; - crypto_worker_main_t *cwm; - crypto_dev_t *dev; - u32 thread_idx, skip_master; - u16 res_idx, *idx; - u8 used; - u16 i; - - skip_master = vlib_num_workers () > 0; - - /* *INDENT-OFF* */ - vec_foreach (dev, dcm->dev) - { - vec_foreach_index (thread_idx, dcm->workers_main) - { - if (vec_len (dev->free_resources) == 0) - break; - - if (thread_idx < skip_master) - continue; - - /* Check thread is not already using the device */ - vec_foreach (idx, dev->used_resources) - if (dcm->resource[idx[0]].thread_idx == thread_idx) - continue; - - cwm = vec_elt_at_index (dcm->workers_main, thread_idx); - - used = 0; - res_idx = vec_pop (dev->free_resources); - - /* Set device only for supported algos */ - for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) - if (dev->cipher_support[i] && - cwm->cipher_resource_idx[i] == (u16) ~0) - { - dcm->cipher_algs[i].disabled--; - cwm->cipher_resource_idx[i] = res_idx; - used = 1; - } - - for (i = 0; i < IPSEC_INTEG_N_ALG; i++) - if (dev->auth_support[i] && - cwm->auth_resource_idx[i] == (u16) ~0) - { - dcm->auth_algs[i].disabled--; - cwm->auth_resource_idx[i] = res_idx; - used = 1; - } - - if (!used) - { - vec_add1 (dev->free_resources, res_idx); - continue; - } - - vec_add1 (dev->used_resources, res_idx); - - res = vec_elt_at_index (dcm->resource, res_idx); - - ASSERT (res->thread_idx == (u16) ~0); - res->thread_idx = thread_idx; - - /* Add device to vector of polling resources */ - vec_add1 (cwm->resource_idx, res_idx); - } - } - /* *INDENT-ON* */ -} - -static void -crypto_op_init (struct rte_mempool *mempool, - void *_arg __attribute__ ((unused)), - void *_obj, unsigned i __attribute__ ((unused))) -{ - struct rte_crypto_op *op = _obj; - - op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; - op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - op->phys_addr = rte_mempool_virt2iova (_obj); - op->mempool = mempool; -} - -static clib_error_t * -crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - dpdk_config_main_t *conf = &dpdk_config_main; - crypto_data_t *data; - u8 *pool_name; - u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private); - struct rte_crypto_op_pool_private *priv; - struct rte_mempool *mp; - - data = vec_elt_at_index (dcm->data, numa); - - /* Already allocated */ - if (data->crypto_op) - return NULL; - - pool_name = format (0, "crypto_pool_numa%u%c", numa, 0); - - if (conf->num_crypto_mbufs == 0) - conf->num_crypto_mbufs = NUM_CRYPTO_MBUFS; - - mp = rte_mempool_create ((char *) pool_name, conf->num_crypto_mbufs, - crypto_op_len (), 512, pool_priv_size, NULL, NULL, - crypto_op_init, NULL, numa, 0); - - vec_free (pool_name); - - if (!mp) - return clib_error_return (0, "failed to create crypto op mempool"); - - /* Initialize mempool private data */ - priv = rte_mempool_get_priv (mp); - priv->priv_size = pool_priv_size; - priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - - data->crypto_op = mp; - - return NULL; -} - -static clib_error_t * -crypto_create_session_h_pool (vlib_main_t * vm, u8 numa) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - u8 *pool_name; - struct rte_mempool *mp; - u32 elt_size; - - data = vec_elt_at_index (dcm->data, numa); - - if (data->session_h) - return NULL; - - pool_name = format (0, "session_h_pool_numa%u%c", numa, 0); - - - elt_size = rte_cryptodev_sym_get_header_session_size (); - -#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) - mp = rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, - elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); -#else - /* XXX Experimental tag in DPDK 19.02 */ - mp = rte_cryptodev_sym_session_pool_create ((char *) pool_name, - DPDK_CRYPTO_NB_SESS_OBJS, - elt_size, 512, 0, numa); -#endif - vec_free (pool_name); - - if (!mp) - return clib_error_return (0, "failed to create crypto session mempool"); - - data->session_h = mp; - - return NULL; -} - -static clib_error_t * -crypto_create_session_drv_pool (vlib_main_t * vm, crypto_dev_t * dev) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - u8 *pool_name; - struct rte_mempool *mp; - u32 elt_size; - u8 numa = dev->numa; - - data = vec_elt_at_index (dcm->data, numa); - - vec_validate (data->session_drv, dev->drv_id); - vec_validate (data->session_drv_failed, dev->drv_id); - vec_validate_aligned (data->session_by_drv_id_and_sa_index, 32, - CLIB_CACHE_LINE_BYTES); - - if (data->session_drv[dev->drv_id]) - return NULL; - - pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0); - - elt_size = rte_cryptodev_sym_get_private_session_size (dev->id); - mp = - rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, - elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); - - vec_free (pool_name); - - if (!mp) - return clib_error_return (0, "failed to create session drv mempool"); - - data->session_drv[dev->drv_id] = mp; - clib_spinlock_init (&data->lockp); - - return NULL; -} - -static clib_error_t * -crypto_create_pools (vlib_main_t * vm) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - clib_error_t *error = NULL; - crypto_dev_t *dev; - - /* *INDENT-OFF* */ - vec_foreach (dev, dcm->dev) - { - vec_validate_aligned (dcm->data, dev->numa, CLIB_CACHE_LINE_BYTES); - - error = crypto_create_crypto_op_pool (vm, dev->numa); - if (error) - return error; - - error = crypto_create_session_h_pool (vm, dev->numa); - if (error) - return error; - - error = crypto_create_session_drv_pool (vm, dev); - if (error) - return error; - } - /* *INDENT-ON* */ - - return NULL; -} - -static void -crypto_disable (void) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - u8 i; - - dcm->enabled = 0; - - /* *INDENT-OFF* */ - vec_foreach (data, dcm->data) - { - rte_mempool_free (data->crypto_op); - rte_mempool_free (data->session_h); - - vec_foreach_index (i, data->session_drv) - rte_mempool_free (data->session_drv[i]); - - vec_free (data->session_drv); - clib_spinlock_free (&data->lockp); - } - /* *INDENT-ON* */ - - vec_free (dcm->data); - vec_free (dcm->workers_main); - vec_free (dcm->dev); - vec_free (dcm->resource); - vec_free (dcm->cipher_algs); - vec_free (dcm->auth_algs); -} - -static clib_error_t * -dpdk_ipsec_enable_disable (int is_enable) -{ - vlib_main_t *vm = vlib_get_main (); - vlib_thread_main_t *tm = vlib_get_thread_main (); - vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input"); - u32 skip_master = vlib_num_workers () > 0; - u32 n_mains = tm->n_vlib_mains; - u32 i; - - ASSERT (node); - for (i = skip_master; i < n_mains; i++) - vlib_node_set_state (vlib_mains[i], node->index, is_enable != 0 ? - VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED); - - return 0; -} - -static clib_error_t * -dpdk_ipsec_main_init (vlib_main_t * vm) -{ - ipsec_main_t *im = &ipsec_main; - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - crypto_worker_main_t *cwm; - clib_error_t *error = NULL; - u32 skip_master, n_mains; - - n_mains = tm->n_vlib_mains; - skip_master = vlib_num_workers () > 0; - - algos_init (n_mains - skip_master); - - crypto_scan_devs (n_mains - skip_master); - - if (!(dcm->enabled)) - { - vlib_log_warn (dpdk_main.log_default, - "not enough DPDK crypto resources"); - crypto_disable (); - return 0; - } - - dcm->session_timeout = 10e9; - - vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1, - (crypto_worker_main_t) EMPTY_STRUCT, - CLIB_CACHE_LINE_BYTES); - - /* *INDENT-OFF* */ - vec_foreach (cwm, dcm->workers_main) - { - vec_validate_init_empty_aligned (cwm->ops, VLIB_FRAME_SIZE - 1, 0, - CLIB_CACHE_LINE_BYTES); - clib_memset (cwm->cipher_resource_idx, ~0, - IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx)); - clib_memset (cwm->auth_resource_idx, ~0, - IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx)); - } - /* *INDENT-ON* */ - - crypto_auto_placement (); - - error = crypto_create_pools (vm); - if (error) - { - clib_error_report (error); - crypto_disable (); - return 0; - } - - u32 idx = ipsec_register_esp_backend ( - vm, im, "dpdk backend", "dpdk-esp4-encrypt", "dpdk-esp4-encrypt-tun", - "dpdk-esp4-decrypt", "dpdk-esp4-decrypt", "dpdk-esp6-encrypt", - "dpdk-esp6-encrypt-tun", "dpdk-esp6-decrypt", "dpdk-esp6-decrypt", - "error-drop", dpdk_ipsec_check_support, add_del_sa_session, - dpdk_ipsec_enable_disable); - int rv; - if (im->esp_current_backend == ~0) - { - rv = ipsec_select_esp_backend (im, idx); - ASSERT (rv == 0); - } - return 0; -} - -VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_main_init); - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h deleted file mode 100644 index 741674376e3..00000000000 --- a/src/plugins/dpdk/ipsec/ipsec.h +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Copyright (c) 2017 Intel and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef __DPDK_IPSEC_H__ -#define __DPDK_IPSEC_H__ - -#include -#include -#include - -#undef always_inline -#include -#include -#include - -#if CLIB_DEBUG > 0 -#define always_inline static inline -#else -#define always_inline static inline __attribute__ ((__always_inline__)) -#endif - -#define DPDK_CRYPTO_N_QUEUE_DESC 2048 -#define DPDK_CRYPTO_NB_SESS_OBJS 20000 - -#define foreach_dpdk_crypto_input_next \ - _(DROP, "error-drop") \ - _(IP4_LOOKUP, "ip4-lookup") \ - _(IP6_LOOKUP, "ip6-lookup") \ - _(INTERFACE_OUTPUT, "interface-output") \ - _(MIDCHAIN, "adj-midchain-tx") \ - _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \ - _(DECRYPT6_POST, "dpdk-esp6-decrypt-post") - -typedef enum -{ -#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f, - foreach_dpdk_crypto_input_next -#undef _ - DPDK_CRYPTO_INPUT_N_NEXT, -} dpdk_crypto_input_next_t; - -#define MAX_QP_PER_LCORE 16 - -typedef struct -{ - u32 salt; - u32 iv[2]; - u32 cnt; -} dpdk_gcm_cnt_blk; - -typedef struct -{ - u32 next; - u32 bi; - u8 encrypt; - CLIB_ALIGN_MARK (mark0, 16); - dpdk_gcm_cnt_blk cb; - u8 aad[16]; - u8 icv[32]; /* XXX last 16B in next cache line */ -} dpdk_op_priv_t; - -typedef struct -{ - u16 *resource_idx; - struct rte_crypto_op **ops; - u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG]; - u16 auth_resource_idx[IPSEC_INTEG_N_ALG]; - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); -} crypto_worker_main_t; - -typedef struct -{ - CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */ - char *name; - enum rte_crypto_sym_xform_type type; - u32 alg; - u8 key_len; - u8 iv_len; - u8 trunc_size; - u8 boundary; - u8 disabled; - u8 resources; -} crypto_alg_t; - -typedef struct -{ - u16 *free_resources; - u16 *used_resources; - u8 cipher_support[IPSEC_CRYPTO_N_ALG]; - u8 auth_support[IPSEC_INTEG_N_ALG]; - u8 drv_id; - u8 numa; - u16 id; - const char *name; - u32 max_qp; - u64 features; -} crypto_dev_t; - -typedef struct -{ - const char *name; - u16 *devs; -} crypto_drv_t; - -typedef struct -{ - u16 thread_idx; - u8 remove; - u8 drv_id; - u8 dev_id; - u8 numa; - u16 qp_id; - u16 inflights[2]; - u16 n_ops; - u16 __unused; - struct rte_crypto_op *ops[VLIB_FRAME_SIZE]; - u32 bi[VLIB_FRAME_SIZE]; - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); -} crypto_resource_t; - -typedef struct -{ - u64 ts; - struct rte_cryptodev_sym_session *session; -} crypto_session_disposal_t; - -typedef struct -{ - struct rte_cryptodev_sym_session *session; - u64 dev_mask; - CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */ -} crypto_session_by_drv_t; - -typedef struct -{ - struct rte_mempool *crypto_op; - struct rte_mempool *session_h; - struct rte_mempool **session_drv; - crypto_session_disposal_t *session_disposal; - uword *session_by_sa_index; - u64 crypto_op_get_failed; - u64 session_h_failed; - u64 *session_drv_failed; - crypto_session_by_drv_t *session_by_drv_id_and_sa_index; - clib_spinlock_t lockp; - /* Required for vec_validate_aligned */ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); -} crypto_data_t; - -typedef struct -{ - crypto_worker_main_t *workers_main; - crypto_dev_t *dev; - crypto_resource_t *resource; - crypto_alg_t *cipher_algs; - crypto_alg_t *auth_algs; - crypto_data_t *data; - crypto_drv_t *drv; - u64 session_timeout; /* nsec */ - u8 enabled; -} dpdk_crypto_main_t; - -extern dpdk_crypto_main_t dpdk_crypto_main; - -static const u8 pad_data[] = - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 }; - -void crypto_auto_placement (void); - -clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session, - u32 sa_idx, crypto_resource_t * res, - crypto_worker_main_t * cwm, u8 is_outbound); - -static_always_inline u32 -crypto_op_len (void) -{ - const u32 align = 4; - u32 op_size = - sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); - - return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t); -} - -static_always_inline u32 -crypto_op_get_priv_offset (void) -{ - const u32 align = 16; - u32 offset; - - offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); - offset = (offset + align - 1) & ~(align - 1); - - return offset; -} - -static_always_inline dpdk_op_priv_t * -crypto_op_get_priv (struct rte_crypto_op * op) -{ - return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ()); -} - - -static_always_inline void -add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session, - crypto_data_t * data, u32 drv_id, u32 sa_idx) -{ - crypto_session_by_drv_t *sbd; - vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx, - CLIB_CACHE_LINE_BYTES); - sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); - sbd->dev_mask |= 1L << drv_id; - sbd->session = session; -} - -static_always_inline struct rte_cryptodev_sym_session * -get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx) -{ - crypto_session_by_drv_t *sess_by_sa; - if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx) - return NULL; - sess_by_sa = - vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); - return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL; -} - -static_always_inline clib_error_t * -crypto_get_session (struct rte_cryptodev_sym_session ** session, - u32 sa_idx, - crypto_resource_t * res, - crypto_worker_main_t * cwm, u8 is_outbound) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data; - struct rte_cryptodev_sym_session *sess; - - data = vec_elt_at_index (dcm->data, res->numa); - sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx); - - if (PREDICT_FALSE (!sess)) - return create_sym_session (session, sa_idx, res, cwm, is_outbound); - - session[0] = sess; - - return NULL; -} - -static_always_inline u16 -get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa) -{ - u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg]; - u16 auth_res = cwm->auth_resource_idx[sa->integ_alg]; - u8 is_aead; - - /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */ - - is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || - (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || - (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)); - - if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE) - return auth_res; - - if (cipher_res == auth_res) - return cipher_res; - - if (is_aead) - return cipher_res; - - return (u16) ~ 0; -} - -static_always_inline i32 -crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data = vec_elt_at_index (dcm->data, numa); - i32 ret; - - ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n); - - /* *INDENT-OFF* */ - data->crypto_op_get_failed += ! !ret; - /* *INDENT-ON* */ - - return ret; -} - -static_always_inline void -crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_data_t *data = vec_elt_at_index (dcm->data, numa); - - if (!n) - return; - - rte_mempool_put_bulk (data->crypto_op, (void **) ops, n); -} - -static_always_inline void -crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, - u32 node_index, u32 error, u8 numa, u8 encrypt) -{ - dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_resource_t *res; - u16 *res_idx; - - /* *INDENT-OFF* */ - vec_foreach (res_idx, cwm->resource_idx) - { - u16 enq, n_ops; - res = vec_elt_at_index (dcm->resource, res_idx[0]); - - if (!res->n_ops) - continue; - - n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt]; - n_ops = res->n_ops < n_ops ? res->n_ops : n_ops; - enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id, - res->ops, n_ops); - ASSERT (n_ops == enq); - res->inflights[encrypt] += enq; - - if (PREDICT_FALSE (enq < res->n_ops)) - { - crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq); - vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq); - - vlib_node_increment_counter (vm, node_index, error, - res->n_ops - enq); - } - res->n_ops = 0; - } - /* *INDENT-ON* */ -} - -static_always_inline void -crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi) -{ - icb->salt = salt; - icb->iv[0] = seq; - icb->iv[1] = seq_hi; -} - -static_always_inline void -crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0, - struct rte_crypto_op *op, void *session, - u32 cipher_off, u32 cipher_len, - u32 auth_off, u32 auth_len, - u8 * aad, u8 * digest, u64 digest_paddr) -{ - struct rte_crypto_sym_op *sym_op; - - sym_op = (struct rte_crypto_sym_op *) (op + 1); - - sym_op->m_src = mb0; - sym_op->session = session; - - if (is_aead) - { - sym_op->aead.data.offset = cipher_off; - sym_op->aead.data.length = cipher_len; - - sym_op->aead.aad.data = aad; - sym_op->aead.aad.phys_addr = - op->phys_addr + (uintptr_t) aad - (uintptr_t) op; - - sym_op->aead.digest.data = digest; - sym_op->aead.digest.phys_addr = digest_paddr; - } - else - { - sym_op->cipher.data.offset = cipher_off; - sym_op->cipher.data.length = cipher_len; - - sym_op->auth.data.offset = auth_off; - sym_op->auth.data.length = auth_len; - - sym_op->auth.digest.data = digest; - sym_op->auth.digest.phys_addr = digest_paddr; - } -} - -#endif /* __DPDK_IPSEC_H__ */ - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ -- cgit 1.2.3-korg