From 6c8533d4c18460029adbe0825b8dee257805fbc8 Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Thu, 25 Feb 2021 12:53:36 +0000 Subject: dpdk: deprecate ipsec backend Type: refactor DPDK crypto devices are now accessible via the async infra, so there is no need for the DPDK ipsec plugin. In addition this patch fixes the problem that cryptodev backend not working when master core and worker cores lies in different numa nodes. Signed-off-by: Fan Zhang Signed-off-by: Neale Ranns Change-Id: Ie8516bea706248c7bc25abac53a9c656bb8247d9 --- extras/deprecated/dpdk-ipsec/cli.c | 674 ++++++++++++ extras/deprecated/dpdk-ipsec/crypto_node.c | 330 ++++++ extras/deprecated/dpdk-ipsec/dir.dox | 27 + .../deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md | 87 ++ extras/deprecated/dpdk-ipsec/esp_decrypt.c | 739 +++++++++++++ extras/deprecated/dpdk-ipsec/esp_encrypt.c | 709 +++++++++++++ extras/deprecated/dpdk-ipsec/ipsec.c | 1087 ++++++++++++++++++++ extras/deprecated/dpdk-ipsec/ipsec.h | 404 ++++++++ 8 files changed, 4057 insertions(+) create mode 100644 extras/deprecated/dpdk-ipsec/cli.c create mode 100644 extras/deprecated/dpdk-ipsec/crypto_node.c create mode 100644 extras/deprecated/dpdk-ipsec/dir.dox create mode 100644 extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md create mode 100644 extras/deprecated/dpdk-ipsec/esp_decrypt.c create mode 100644 extras/deprecated/dpdk-ipsec/esp_encrypt.c create mode 100644 extras/deprecated/dpdk-ipsec/ipsec.c create mode 100644 extras/deprecated/dpdk-ipsec/ipsec.h (limited to 'extras') diff --git a/extras/deprecated/dpdk-ipsec/cli.c b/extras/deprecated/dpdk-ipsec/cli.c new file mode 100644 index 00000000000..8fdda020a77 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/cli.c @@ -0,0 +1,674 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +static u8 * +format_crypto_resource (u8 * s, va_list * args) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + u32 indent = va_arg (*args, u32); + u32 res_idx = va_arg (*args, u32); + + crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); + + + s = format (s, "%U thr_id %3d qp %2u dec_inflight %u, enc_inflights %u\n", + format_white_space, indent, (i16) res->thread_idx, + res->qp_id, res->inflights[0], res->inflights[1]); + + return s; +} + +static u8 * +format_crypto (u8 * s, va_list * args) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev = va_arg (*args, crypto_dev_t *); + crypto_drv_t *drv = vec_elt_at_index (dcm->drv, dev->drv_id); + u64 feat, mask; + u32 i; + char *pre = " "; + + s = format (s, "%-25s%-20s%-10s\n", dev->name, drv->name, + rte_cryptodevs[dev->id].data->dev_started ? "up" : "down"); + s = format (s, " numa_node %u, max_queues %u\n", dev->numa, dev->max_qp); + + if (dev->features) + { + for (mask = 1; mask != 0; mask <<= 1) + { + feat = dev->features & mask; + if (feat) + { + s = + format (s, "%s%s", pre, + rte_cryptodev_get_feature_name (feat)); + pre = ", "; + } + } + s = format (s, "\n"); + } + + s = format (s, " Cipher:"); + pre = " "; + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i]) + { + s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + s = format (s, " Auth:"); + pre = " "; + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i]) + { + s = format (s, "%s%s", pre, dcm->auth_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + struct rte_cryptodev_stats stats; + rte_cryptodev_stats_get (dev->id, &stats); + + s = + format (s, + " enqueue %-10lu dequeue %-10lu enqueue_err %-10lu dequeue_err %-10lu \n", + stats.enqueued_count, stats.dequeued_count, + stats.enqueue_err_count, stats.dequeue_err_count); + + u16 *res_idx; + s = format (s, " free_resources %u :", vec_len (dev->free_resources)); + + u32 indent = format_get_indent (s); + s = format (s, "\n"); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->free_resources) + s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); + /* *INDENT-ON* */ + + s = format (s, " used_resources %u :", vec_len (dev->used_resources)); + indent = format_get_indent (s); + + s = format (s, "\n"); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + s = format (s, "%U", format_crypto_resource, indent, res_idx[0]); + /* *INDENT-ON* */ + + s = format (s, "\n"); + + return s; +} + + +static clib_error_t * +clear_crypto_stats_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + rte_cryptodev_stats_reset (dev->id); + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to clear the DPDK Crypto device statistics. + * + * @cliexpar + * Example of how to clear the DPDK Crypto device statistics: + * @cliexsart{clear dpdk crypto devices statistics} + * vpp# clear dpdk crypto devices statistics + * @cliexend + * Example of clearing the DPDK Crypto device statistic data: + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_dpdk_crypto_stats, static) = { + .path = "clear dpdk crypto devices statistics", + .short_help = "clear dpdk crypto devices statistics", + .function = clear_crypto_stats_fn, +}; +/* *INDENT-ON* */ + + +static clib_error_t * +show_dpdk_crypto_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + vlib_cli_output (vm, "%U", format_crypto, dev); + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to display the DPDK Crypto device information. + * + * @cliexpar + * Example of how to display the DPDK Crypto device information: + * @cliexsart{show dpdk crypto devices} + * vpp# show dpdk crypto devices + * aesni_mb0 crypto_aesni_mb up + * numa_node 0, max_queues 4 + * SYMMETRIC_CRYPTO, SYM_OPERATION_CHAINING, CPU_AVX2, CPU_AESNI + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256, aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * enqueue 2 dequeue 2 enqueue_err 0 dequeue_err 0 + * free_resources 3 : + * thr_id -1 qp 3 inflight 0 + * thr_id -1 qp 2 inflight 0 + * thr_id -1 qp 1 inflight 0 + * used_resources 1 : + * thr_id 1 qp 0 inflight 0 + * @cliexend + * Example of displaying the DPDK Crypto device data when enabled: + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto, static) = { + .path = "show dpdk crypto devices", + .short_help = "show dpdk crypto devices", + .function = show_dpdk_crypto_fn, +}; + +/* *INDENT-ON* */ +static u8 * +format_crypto_worker (u8 * s, va_list * args) +{ + u32 thread_idx = va_arg (*args, u32); + u8 verbose = (u8) va_arg (*args, u32); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + crypto_resource_t *res; + u16 *res_idx; + char *pre, *ind; + u32 i; + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + s = format (s, "Thread %u (%v):\n", thread_idx, + vlib_worker_threads[thread_idx].name); + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + ind = " "; + res = vec_elt_at_index (dcm->resource, res_idx[0]); + s = format (s, "%s%-20s dev-id %2u queue-pair %2u\n", + ind, vec_elt_at_index (dcm->dev, res->dev_id)->name, + res->dev_id, res->qp_id); + + ind = " "; + if (verbose) + { + s = format (s, "%sCipher:", ind); + pre = " "; + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (cwm->cipher_resource_idx[i] == res_idx[0]) + { + s = format (s, "%s%s", pre, dcm->cipher_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + + s = format (s, "%sAuth:", ind); + pre = " "; + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (cwm->auth_resource_idx[i] == res_idx[0]) + { + s = format (s, "%s%s", pre, dcm->auth_algs[i].name); + pre = ", "; + } + s = format (s, "\n"); + } + } + /* *INDENT-ON* */ + + return s; +} + +static clib_error_t * +common_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd, u8 verbose) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + clib_error_t *error = NULL; + u32 i; + u8 skip_master; + + if (!dcm->enabled) + { + vlib_cli_output (vm, "\nDPDK Cryptodev support is disabled\n"); + return error; + } + + skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach_index (i, dcm->workers_main) + { + if (i < skip_master) + continue; + + vlib_cli_output (vm, "%U\n", format_crypto_worker, i, verbose); + } + /* *INDENT-ON* */ + + return error; +} + +static clib_error_t * +show_dpdk_crypto_placement_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return common_crypto_placement_fn (vm, input, cmd, 0); +} + +static clib_error_t * +show_dpdk_crypto_placement_v_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return common_crypto_placement_fn (vm, input, cmd, 1); +} + +/*? + * This command is used to display the DPDK Crypto device placement. + * + * @cliexpar + * Example of displaying the DPDK Crypto device placement: + * @cliexstart{show dpdk crypto placement} + * vpp# show dpdk crypto placement + * Thread 1 (vpp_wk_0): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 + * + * Thread 2 (vpp_wk_1): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_placement, static) = { + .path = "show dpdk crypto placement", + .short_help = "show dpdk crypto placement", + .function = show_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +/*? + * This command is used to display the DPDK Crypto device placement + * with verbose output. + * + * @cliexpar + * Example of displaying the DPDK Crypto device placement verbose: + * @cliexstart{show dpdk crypto placement verbose} + * vpp# show dpdk crypto placement verbose + * Thread 1 (vpp_wk_0): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 0 + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 0 + * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: + * + * Thread 2 (vpp_wk_1): + * cryptodev_aesni_mb_p dev-id 0 queue-pair 1 + * Cipher: aes-cbc-128, aes-cbc-192, aes-cbc-256, aes-ctr-128, aes-ctr-192, aes-ctr-256 + * Auth: md5-96, sha1-96, sha-256-128, sha-384-192, sha-512-256 + * cryptodev_aesni_gcm_ dev-id 1 queue-pair 1 + * Cipher: aes-gcm-128, aes-gcm-192, aes-gcm-256 + * Auth: + * + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_placement_v, static) = { + .path = "show dpdk crypto placement verbose", + .short_help = "show dpdk crypto placement verbose", + .function = show_dpdk_crypto_placement_v_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +set_dpdk_crypto_placement_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + crypto_dev_t *dev; + u32 thread_idx, i; + u16 res_idx, *idx; + u8 dev_idx, auto_en = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return clib_error_return (0, "invalid syntax"); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) + ; + else if (unformat (line_input, "auto")) + auto_en = 1; + else + { + unformat_free (line_input); + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + } + + unformat_free (line_input); + + if (auto_en) + { + crypto_auto_placement (); + return 0; + } + + /* TODO support device name */ + + if (!(dev_idx < vec_len (dcm->dev))) + return clib_error_return (0, "please specify valid device index"); + + if (thread_idx != (u32) ~ 0 && !(thread_idx < vec_len (dcm->workers_main))) + return clib_error_return (0, "invalid thread index"); + + dev = vec_elt_at_index (dcm->dev, dev_idx); + if (!(vec_len (dev->free_resources))) + return clib_error_return (0, "all device resources are being used"); + + /* Check thread is not already using the device */ + /* *INDENT-OFF* */ + vec_foreach (idx, dev->used_resources) + if (dcm->resource[idx[0]].thread_idx == thread_idx) + return clib_error_return (0, "thread %u already using device %u", + thread_idx, dev_idx); + /* *INDENT-ON* */ + + res_idx = vec_pop (dev->free_resources); + vec_add1 (dev->used_resources, res_idx); + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + ASSERT (dcm->resource[res_idx].thread_idx == (u16) ~ 0); + dcm->resource[res_idx].thread_idx = thread_idx; + + /* Add device to vector of polling resources */ + vec_add1 (cwm->resource_idx, res_idx); + + /* Set device as default for all supported algos */ + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i]) + { + if (cwm->cipher_resource_idx[i] == (u16) ~ 0) + dcm->cipher_algs[i].disabled--; + cwm->cipher_resource_idx[i] = res_idx; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i]) + { + if (cwm->auth_resource_idx[i] == (u16) ~ 0) + dcm->auth_algs[i].disabled--; + cwm->auth_resource_idx[i] = res_idx; + } + + /* Check if any unused resource */ + + u8 used = 0; + /* *INDENT-OFF* */ + vec_foreach (idx, cwm->resource_idx) + { + if (idx[0] == res_idx) + continue; + + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + used |= cwm->cipher_resource_idx[i] == idx[0]; + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + used |= cwm->auth_resource_idx[i] == idx[0]; + + vec_elt_at_index (dcm->resource, idx[0])->remove = !used; + } + /* *INDENT-ON* */ + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_dpdk_crypto_placement, static) = { + .path = "set dpdk crypto placement", + .short_help = "set dpdk crypto placement ( | auto)", + .function = set_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +/* + * The thread will not enqueue more operations to the device but will poll + * from it until there are no more inflight operations. +*/ +static void +dpdk_crypto_clear_resource (u16 res_idx) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx); + crypto_worker_main_t *cwm = &dcm->workers_main[res->thread_idx]; + u32 i; + + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (cwm->cipher_resource_idx[i] == res_idx) + { + cwm->cipher_resource_idx[i] = (u16) ~ 0; + dcm->cipher_algs[i].disabled++; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (cwm->auth_resource_idx[i] == res_idx) + { + cwm->auth_resource_idx[i] = (u16) ~ 0; + dcm->auth_algs[i].disabled++; + } + + /* Fully remove device on crypto_node once there are no inflights */ + res->remove = 1; +} + +static clib_error_t * +clear_dpdk_crypto_placement_fn (vlib_main_t * vm, + unformat_input_t * + input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_dev_t *dev; + u32 thread_idx = (u32) ~ 0; + u16 *res_idx; + u8 dev_idx = (u8) ~ 0; + u8 free_all = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return clib_error_return (0, "invalid syntax"); + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%u %u", &dev_idx, &thread_idx)) + ; + else if (unformat (line_input, "%u", &dev_idx)) + free_all = 1; + else + { + unformat_free (line_input); + return clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + } + } + + unformat_free (line_input); + + if (!(dev_idx < vec_len (dcm->dev))) + return clib_error_return (0, "invalid device index"); + + dev = vec_elt_at_index (dcm->dev, dev_idx); + + /* Clear all resources placements */ + if (free_all) + { + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + dpdk_crypto_clear_resource (res_idx[0]); + /* *INDENT-ON* */ + + return 0; + } + + if (!(thread_idx < vec_len (dcm->workers_main))) + return clib_error_return (0, "invalid thread index"); + + /* Clear placement of device for given thread index */ + /* *INDENT-OFF* */ + vec_foreach (res_idx, dev->used_resources) + if (dcm->resource[res_idx[0]].thread_idx == thread_idx) + break; + /* *INDENT-ON* */ + + if (!(res_idx < vec_end (dev->used_resources))) + return clib_error_return (0, "thread %u is not using device %u", + thread_idx, dev_idx); + + dpdk_crypto_clear_resource (res_idx[0]); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_dpdk_crypto_placement, static) = { + .path = "clear dpdk crypto placement", + .short_help = "clear dpdk crypto placement []", + .function = clear_dpdk_crypto_placement_fn, +}; +/* *INDENT-ON* */ + +u8 * +format_dpdk_mempool (u8 * s, va_list * args) +{ + struct rte_mempool *mp = va_arg (*args, struct rte_mempool *); + u32 indent = format_get_indent (s); + u32 count = rte_mempool_avail_count (mp); + + s = format (s, "%s\n%Uavailable %7d, allocated %7d total %7d\n", + mp->name, format_white_space, indent + 2, + count, mp->size - count, mp->size); + s = format (s, "%Uphys_addr %p, flags %08x, nb_mem_chunks %u\n", + format_white_space, indent + 2, + mp->mz->iova, mp->flags, mp->nb_mem_chunks); + s = format (s, "%Uelt_size %4u, header_size %3u, trailer_size %u\n", + format_white_space, indent + 2, + mp->elt_size, mp->header_size, mp->trailer_size); + s = format (s, "%Uprivate_data_size %3u, total_elt_size %u\n", + format_white_space, indent + 2, + mp->private_data_size, + mp->elt_size + mp->header_size + mp->trailer_size); + return s; +} + +static clib_error_t * +show_dpdk_crypto_pools_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + if (data->crypto_op) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->crypto_op); + if (data->session_h) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, data->session_h); + + struct rte_mempool **mp; + vec_foreach (mp, data->session_drv) + if (mp[0]) + vlib_cli_output (vm, "%U\n", format_dpdk_mempool, mp[0]); + } + /* *INDENT-ON* */ + + return NULL; +} + +/*? + * This command is used to display the DPDK Crypto pools information. + * + * @cliexpar + * Example of how to display the DPDK Crypto pools information: + * @cliexstart{show crypto device mapping} + * vpp# show dpdk crypto pools + * crypto_pool_numa1 + * available 15872, allocated 512 total 16384 + * phys_addr 0xf3d2086c0, flags 00000010, nb_mem_chunks 1 + * elt_size 160, header_size 64, trailer_size 96 + * private_data_size 64, total_elt_size 320 + * + * session_h_pool_numa1 + * available 19998, allocated 2 total 20000 + * phys_addr 0xf3c9c4380, flags 00000010, nb_mem_chunks 1 + * elt_size 40, header_size 64, trailer_size 88 + * private_data_size 0, total_elt_size 192 + * + * session_drv0_pool_numa1 + * available 19998, allocated 2 total 20000 + * phys_addr 0xf3ad42d80, flags 00000010, nb_mem_chunks 1 + * elt_size 512, header_size 64, trailer_size 0 + * private_data_size 0, total_elt_size 576 + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_dpdk_crypto_pools, static) = { + .path = "show dpdk crypto pools", + .short_help = "show dpdk crypto pools", + .function = show_dpdk_crypto_pools_fn, +}; +/* *INDENT-ON* */ + +/* TODO Allow user define number of sessions supported */ +/* TODO Allow user define descriptor queue size */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/crypto_node.c b/extras/deprecated/dpdk-ipsec/crypto_node.c new file mode 100644 index 00000000000..893848c05b6 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/crypto_node.c @@ -0,0 +1,330 @@ +/* + *------------------------------------------------------------------ + * crypto_node.c - DPDK Cryptodev input node + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a opy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define foreach_dpdk_crypto_input_error \ + _(DQ_COPS, "Crypto ops dequeued") \ + _(AUTH_FAILED, "Crypto verification failed") \ + _(STATUS, "Crypto operation failed") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_ERROR_##f, + foreach_dpdk_crypto_input_error +#undef _ + DPDK_CRYPTO_INPUT_N_ERROR, +} dpdk_crypto_input_error_t; + +static char *dpdk_crypto_input_error_strings[] = { +#define _(n, s) s, + foreach_dpdk_crypto_input_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_crypto_input_node; + +typedef struct +{ + /* dev id of this cryptodev */ + u16 dev_id; + u16 next_index; +} dpdk_crypto_input_trace_t; + +static u8 * +format_dpdk_crypto_input_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + dpdk_crypto_input_trace_t *t = va_arg (*args, dpdk_crypto_input_trace_t *); + + s = format (s, "cryptodev-id %d next-index %d", t->dev_id, t->next_index); + + return s; +} + +static_always_inline void +dpdk_crypto_input_check_op (vlib_main_t * vm, vlib_node_runtime_t * node, + struct rte_crypto_op *op0, u16 * next) +{ + if (PREDICT_FALSE (op0->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + { + next[0] = DPDK_CRYPTO_INPUT_NEXT_DROP; + vlib_node_increment_counter (vm, + node->node_index, + DPDK_CRYPTO_INPUT_ERROR_STATUS, 1); + /* if auth failed */ + if (op0->status == RTE_CRYPTO_OP_STATUS_AUTH_FAILED) + vlib_node_increment_counter (vm, + node->node_index, + DPDK_CRYPTO_INPUT_ERROR_AUTH_FAILED, 1); + } +} + +always_inline void +dpdk_crypto_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, + u8 dev_id, u32 * bis, u16 * nexts, u32 n_deq) +{ + u32 n_left, n_trace; + + if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)))) + { + n_left = n_deq; + + while (n_trace && n_left) + { + vlib_buffer_t *b0; + u16 next; + u32 bi; + + bi = bis[0]; + next = nexts[0]; + + b0 = vlib_get_buffer (vm, bi); + + if (PREDICT_TRUE + (vlib_trace_buffer (vm, node, next, b0, /* follow_chain */ 0))) + { + dpdk_crypto_input_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->dev_id = dev_id; + tr->next_index = next; + n_trace--; + } + + n_left--; + nexts++; + bis++; + } + vlib_set_trace_count (vm, node, n_trace); + } +} + +static_always_inline u32 +dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm, + vlib_node_runtime_t * node, crypto_resource_t * res) +{ + u8 numa = rte_socket_id (); + u32 n_ops, total_n_deq, n_deq[2]; + u32 bis[VLIB_FRAME_SIZE], *bi; + u16 nexts[VLIB_FRAME_SIZE], *next; + struct rte_crypto_op **ops; + + n_deq[0] = 0; + n_deq[1] = 0; + bi = bis; + next = nexts; + ops = cwm->ops; + + n_ops = total_n_deq = rte_cryptodev_dequeue_burst (res->dev_id, + res->qp_id, + ops, VLIB_FRAME_SIZE); + /* no op dequeued, do not proceed */ + if (n_ops == 0) + return 0; + + while (n_ops >= 4) + { + struct rte_crypto_op *op0, *op1, *op2, *op3; + + /* Prefetch next iteration. */ + if (n_ops >= 8) + { + CLIB_PREFETCH (ops[4], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[5], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[6], CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (ops[7], CLIB_CACHE_LINE_BYTES, LOAD); + + CLIB_PREFETCH (crypto_op_get_priv (ops[4]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[5]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[6]), + CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (crypto_op_get_priv (ops[7]), + CLIB_CACHE_LINE_BYTES, LOAD); + } + + op0 = ops[0]; + op1 = ops[1]; + op2 = ops[2]; + op3 = ops[3]; + + next[0] = crypto_op_get_priv (op0)->next; + next[1] = crypto_op_get_priv (op1)->next; + next[2] = crypto_op_get_priv (op2)->next; + next[3] = crypto_op_get_priv (op3)->next; + + bi[0] = crypto_op_get_priv (op0)->bi; + bi[1] = crypto_op_get_priv (op1)->bi; + bi[2] = crypto_op_get_priv (op2)->bi; + bi[3] = crypto_op_get_priv (op3)->bi; + + n_deq[crypto_op_get_priv (op0)->encrypt] += 1; + n_deq[crypto_op_get_priv (op1)->encrypt] += 1; + n_deq[crypto_op_get_priv (op2)->encrypt] += 1; + n_deq[crypto_op_get_priv (op3)->encrypt] += 1; + + dpdk_crypto_input_check_op (vm, node, op0, next + 0); + dpdk_crypto_input_check_op (vm, node, op1, next + 1); + dpdk_crypto_input_check_op (vm, node, op2, next + 2); + dpdk_crypto_input_check_op (vm, node, op3, next + 3); + + op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op1->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op2->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op3->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* next */ + next += 4; + n_ops -= 4; + ops += 4; + bi += 4; + } + while (n_ops > 0) + { + struct rte_crypto_op *op0; + + op0 = ops[0]; + + next[0] = crypto_op_get_priv (op0)->next; + bi[0] = crypto_op_get_priv (op0)->bi; + + n_deq[crypto_op_get_priv (op0)->encrypt] += 1; + + dpdk_crypto_input_check_op (vm, node, op0, next + 0); + + op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* next */ + next += 1; + n_ops -= 1; + ops += 1; + bi += 1; + } + + vlib_node_increment_counter (vm, node->node_index, + DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq); + + res->inflights[0] -= n_deq[0]; + res->inflights[1] -= n_deq[1]; + + vlib_buffer_enqueue_to_next (vm, node, bis, nexts, total_n_deq); + + dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, total_n_deq); + + crypto_free_ops (numa, cwm->ops, total_n_deq); + + return total_n_deq; +} + +static_always_inline uword +dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm = &dcm->workers_main[vm->thread_index]; + crypto_resource_t *res; + u32 n_deq = 0; + u16 *remove = NULL, *res_idx; + word i; + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + res = vec_elt_at_index (dcm->resource, res_idx[0]); + u32 inflights = res->inflights[0] + res->inflights[1]; + + if (inflights) + n_deq += dpdk_crypto_dequeue (vm, cwm, node, res); + + inflights = res->inflights[0] + res->inflights[1]; + if (PREDICT_FALSE (res->remove && !(inflights))) + vec_add1 (remove, res_idx[0]); + } + /* *INDENT-ON* */ + + /* TODO removal on master thread? */ + if (PREDICT_FALSE (remove != NULL)) + { + /* *INDENT-OFF* */ + vec_foreach (res_idx, remove) + { + i = vec_search (cwm->resource_idx, res_idx[0]); + vec_del1 (cwm->resource_idx, i); + + res = vec_elt_at_index (dcm->resource, res_idx[0]); + res->thread_idx = (u16) ~0; + res->remove = 0; + + i = vec_search (dcm->dev[res->dev_id].used_resources, res_idx[0]); + ASSERT (i != (u16) ~0); + vec_del1 (dcm->dev[res->dev_id].used_resources, i); + vec_add1 (dcm->dev[res->dev_id].free_resources, res_idx[0]); + } + /* *INDENT-ON* */ + + vec_free (remove); + } + + return n_deq; +} + +VLIB_NODE_FN (dpdk_crypto_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_crypto_input_inline (vm, node, from_frame); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_crypto_input_node) = +{ + .name = "dpdk-crypto-input", + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, + .format_trace = format_dpdk_crypto_input_trace, + .type = VLIB_NODE_TYPE_INPUT, + .state = VLIB_NODE_STATE_DISABLED, + .n_errors = DPDK_CRYPTO_INPUT_N_ERROR, + .error_strings = dpdk_crypto_input_error_strings, + .n_next_nodes = DPDK_CRYPTO_INPUT_N_NEXT, + .next_nodes = + { +#define _(s,n) [DPDK_CRYPTO_INPUT_NEXT_##s] = n, + foreach_dpdk_crypto_input_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/dir.dox b/extras/deprecated/dpdk-ipsec/dir.dox new file mode 100644 index 00000000000..05504541abb --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/dir.dox @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016 Intel and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Doxygen directory documentation */ + +/** +@dir src/plugins/dpdk/ipsec +@brief IPSec ESP encrypt/decrypt using DPDK Cryptodev API. + +This directory contains the source code for the DPDK Crypto abstraction layer. + +*/ +/*? %%clicmd:group_label DPDK Crypto %% ?*/ +/*? %%syscfg:group_label DPDK Crypto %% ?*/ diff --git a/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md new file mode 100644 index 00000000000..8cf51f07c03 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/dpdk_crypto_ipsec_doc.md @@ -0,0 +1,87 @@ +# VPP IPSec implementation using DPDK Cryptodev API {#dpdk_crypto_ipsec_doc} + +This document is meant to contain all related information about implementation and usability. + + +## VPP IPsec with DPDK Cryptodev + +DPDK Cryptodev is an asynchronous crypto API that supports both Hardware and Software implementations (for more details refer to [DPDK Cryptography Device Library documentation](http://dpdk.org/doc/guides/prog_guide/cryptodev_lib.html)). + +When there are enough Cryptodev resources for all workers, the node graph is reconfigured by adding and changing the default next nodes. + +The following nodes are added: +* dpdk-crypto-input : polling input node, dequeuing from crypto devices. +* dpdk-esp-encrypt : internal node. +* dpdk-esp-decrypt : internal node. +* dpdk-esp-encrypt-post : internal node. +* dpdk-esp-decrypt-post : internal node. + +Set new default next nodes: +* for esp encryption: esp-encrypt -> dpdk-esp-encrypt +* for esp decryption: esp-decrypt -> dpdk-esp-decrypt + + +### How to enable VPP IPSec with DPDK Cryptodev support + +When building DPDK with VPP, Cryptodev support is always enabled. + +Additionally, on x86_64 platforms, DPDK is built with SW crypto support. + + +### Crypto Resources allocation + +VPP allocates crypto resources based on a best effort approach: +* first allocate Hardware crypto resources, then Software. +* if there are not enough crypto resources for all workers, the graph node is not modified and the default VPP IPsec implementation based in OpenSSL is used. The following message is displayed: + + 0: dpdk_ipsec_init: not enough Cryptodevs, default to OpenSSL IPsec + + +### Configuration example + +To enable DPDK Cryptodev the user just need to provide cryptodevs in the startup.conf. + +Below is an example startup.conf, it is not meant to be a default configuration: + +``` +dpdk { + dev 0000:81:00.0 + dev 0000:81:00.1 + dev 0000:85:01.0 + dev 0000:85:01.1 + vdev crypto_aesni_mb0,socket_id=1 + vdev crypto_aesni_mb1,socket_id=1 +} +``` + +In the above configuration: +* 0000:81:01.0 and 0000:81:01.1 are Ethernet device BDFs. +* 0000:85:01.0 and 0000:85:01.1 are Crypto device BDFs and they require the same driver binding as DPDK Ethernet devices but they do not support any extra configuration options. +* Two AESNI-MB Software (Virtual) Cryptodev PMDs are created in NUMA node 1. + +For further details refer to [DPDK Crypto Device Driver documentation](http://dpdk.org/doc/guides/cryptodevs/index.html) + +### Operational data + +The following CLI command displays the Cryptodev/Worker mapping: + + show crypto device mapping [verbose] + + +### nasm + +Building the DPDK Crypto Libraries requires the open source project nasm (The Netwide +Assembler) to be installed. Recommended version of nasm is 2.12.02. Minimum supported +version of nasm is 2.11.06. Use the following command to determine the current nasm version: + + nasm -v + +CentOS 7.3 and earlier and Fedora 21 and earlier use unsupported versions +of nasm. Use the following set of commands to build a supported version: + + wget http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2 + tar -xjvf nasm-2.12.02.tar.bz2 + cd nasm-2.12.02/ + ./configure + make + sudo make install diff --git a/extras/deprecated/dpdk-ipsec/esp_decrypt.c b/extras/deprecated/dpdk-ipsec/esp_decrypt.c new file mode 100644 index 00000000000..9a782abeb94 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/esp_decrypt.c @@ -0,0 +1,739 @@ +/* + * esp_decrypt.c : IPSec ESP Decrypt node using DPDK Cryptodev + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a opy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define foreach_esp_decrypt_next \ +_(DROP, "error-drop") \ +_(IP4_INPUT, "ip4-input-no-checksum") \ +_(IP6_INPUT, "ip6-input") + +#define _(v, s) ESP_DECRYPT_NEXT_##v, +typedef enum +{ + foreach_esp_decrypt_next +#undef _ + ESP_DECRYPT_N_NEXT, +} esp_decrypt_next_t; + +#define foreach_esp_decrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(DECRYPTION_FAILED, "ESP decryption failed") \ + _(REPLAY, "SA replayed packet") \ + _(NOT_IP, "Not IP packet (dropped)") \ + _(ENQ_FAIL, "Enqueue decrypt failed (queue full)") \ + _(DISCARD, "Not enough crypto operations") \ + _(BAD_LEN, "Invalid ciphertext length") \ + _(SESSION, "Failed to get crypto session") \ + _(NOSUP, "Cipher/Auth not supported") + + +typedef enum +{ +#define _(sym,str) ESP_DECRYPT_ERROR_##sym, + foreach_esp_decrypt_error +#undef _ + ESP_DECRYPT_N_ERROR, +} esp_decrypt_error_t; + +static char *esp_decrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_decrypt_node; +extern vlib_node_registration_t dpdk_esp6_decrypt_node; + +typedef struct +{ + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; + u8 packet_data[64]; +} esp_decrypt_trace_t; + +/* packet trace format function */ +static u8 * +format_esp_decrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + s = format (s, "%U%U", + format_white_space, indent, format_esp_header, t->packet_data); + return s; +} + +always_inline uword +dpdk_esp_decrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6) +{ + u32 n_left_from, *from, *to_next, next_index, thread_index; + u32 thread_idx = vlib_get_thread_index (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = 0; + ipsec_sa_t *sa0 = 0; + crypto_alg_t *cipher_alg = 0, *auth_alg = 0; + struct rte_cryptodev_sym_session *session = 0; + u32 ret, last_sa_index = ~0; + u8 numa = rte_socket_id (); + u8 is_aead = 0; + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_idx); + struct rte_crypto_op **ops = cwm->ops; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + thread_index = vm->thread_index; + + ret = crypto_alloc_ops (numa, ops, n_left_from); + if (ret) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DISCARD, n_left_from); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DISCARD, n_left_from); + /* Discard whole frame */ + vlib_buffer_free (vm, from, n_left_from); + return n_left_from; + } + + next_index = ESP_DECRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + clib_error_t *error; + u32 bi0, sa_index0, iv_size; + u8 trunc_size; + vlib_buffer_t *b0; + esp_header_t *esp0; + struct rte_mbuf *mb0; + struct rte_crypto_op *op; + u16 res_idx; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + mb0 = rte_mbuf_from_vlib_buffer (b0); + esp0 = vlib_buffer_get_current (b0); + + /* ih0/ih6_0 */ + CLIB_PREFETCH (esp0, sizeof (esp0[0]) + 16, LOAD); + /* mb0 */ + CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); + + op = ops[0]; + ops += 1; + ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + dpdk_op_priv_t *priv = crypto_op_get_priv (op); + /* store bi in op private */ + priv->bi = bi0; + priv->encrypt = 0; + + u16 op_len = + sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); + CLIB_PREFETCH (op, op_len, STORE); + + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + vlib_prefetch_combined_counter (&ipsec_sa_counters, + thread_index, sa_index0); + + if (sa_index0 != last_sa_index) + { + sa0 = ipsec_sa_get (sa_index0); + + cipher_alg = + vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + + is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); + if (is_aead) + auth_alg = cipher_alg; + + res_idx = get_resource (cwm, sa0); + + if (PREDICT_FALSE (res_idx == (u16) ~ 0)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_NOSUP, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + res = vec_elt_at_index (dcm->resource, res_idx); + + error = crypto_get_session (&session, sa_index0, res, cwm, 0); + if (PREDICT_FALSE (error || !session)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_SESSION, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_SESSION, + 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + last_sa_index = sa_index0; + } + + /* anti-replay check */ + if (ipsec_sa_anti_replay_check + (sa0, clib_host_to_net_u32 (esp0->seq))) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + if (is_ip6) + priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT6_POST; + else + { + priv->next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST; + b0->flags |= VNET_BUFFER_F_IS_IP4; + } + + /* FIXME multi-seg */ + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, b0->current_length); + + res->ops[res->n_ops] = op; + res->bi[res->n_ops] = bi0; + res->n_ops += 1; + + /* Convert vlib buffer to mbuf */ + mb0->data_len = b0->current_length; + mb0->pkt_len = b0->current_length; + mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->current_data; + + trunc_size = auth_alg->trunc_size; + iv_size = cipher_alg->iv_len; + + /* Outer IP header has already been stripped */ + u16 payload_len = + b0->current_length - sizeof (esp_header_t) - iv_size - trunc_size; + + ASSERT (payload_len >= 4); + + if (payload_len & (cipher_alg->boundary - 1)) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + res->n_ops -= 1; + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + u32 cipher_off, cipher_len; + u32 auth_len = 0; + u8 *aad = NULL; + + u8 *iv = (u8 *) (esp0 + 1); + + dpdk_gcm_cnt_blk *icb = &priv->cb; + + cipher_off = sizeof (esp_header_t) + iv_size; + cipher_len = payload_len; + + u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; + u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); + + if (!is_aead && cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC) + clib_memcpy_fast (icb, iv, 16); + else /* CTR/GCM */ + { + u32 *_iv = (u32 *) iv; + + crypto_set_icb (icb, sa0->salt, _iv[0], _iv[1]); + } + + if (is_aead) + { + aad = priv->aad; + u32 *_aad = (u32 *) aad; + clib_memcpy_fast (aad, esp0, 8); + + /* _aad[3] should always be 0 */ + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) + { + _aad[2] = _aad[1]; + _aad[1] = clib_host_to_net_u32 (sa0->seq_hi); + } + else + _aad[2] = 0; + } + else + { + auth_len = sizeof (esp_header_t) + iv_size + payload_len; + + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + clib_memcpy_fast (priv->icv, digest, trunc_size); + u32 *_digest = (u32 *) digest; + _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); + auth_len += sizeof (sa0->seq_hi); + + digest = priv->icv; + digest_paddr = + op->phys_addr + (uintptr_t) priv->icv - (uintptr_t) op; + } + } + + crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, + 0, auth_len, aad, digest, digest_paddr); + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + clib_memcpy_fast (tr->packet_data, vlib_buffer_get_current (b0), + sizeof (esp_header_t)); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + if (is_ip6) + { + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); + } + else + { + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ ); + } + + crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_decrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node) = { + .name = "dpdk-esp4-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_decrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_decrypt_node) = { + .name = "dpdk-esp6-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * Decrypt Post Node + */ + +#define foreach_esp_decrypt_post_error \ + _(PKTS, "ESP post pkts") + +typedef enum +{ +#define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym, + foreach_esp_decrypt_post_error +#undef _ + ESP_DECRYPT_POST_N_ERROR, +} esp_decrypt_post_error_t; + +static char *esp_decrypt_post_error_strings[] = { +#define _(sym,string) string, + foreach_esp_decrypt_post_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_decrypt_post_node; +extern vlib_node_registration_t dpdk_esp6_decrypt_post_node; + +static u8 * +format_esp_decrypt_post_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + + ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; + if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + s = + format (s, "%U%U", format_white_space, indent, format_ip6_header, ih4); + else + s = + format (s, "%U%U", format_white_space, indent, format_ip4_header, ih4); + + return s; +} + +always_inline uword +dpdk_esp_decrypt_post_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6) +{ + u32 n_left_from, *from, *to_next = 0, next_index; + ipsec_sa_t *sa0; + u32 sa_index0 = ~0; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + esp_footer_t *f0; + u32 bi0, iv_size, next0; + vlib_buffer_t *b0 = 0; + ip4_header_t *ih4 = 0, *oh4 = 0; + ip6_header_t *ih6 = 0, *oh6 = 0; + crypto_alg_t *cipher_alg, *auth_alg; + esp_header_t *esp0; + u8 trunc_size, is_aead; + u16 udp_encap_adv = 0; + + next0 = ESP_DECRYPT_NEXT_DROP; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + esp0 = vlib_buffer_get_current (b0); + + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + sa0 = ipsec_sa_get (sa_index0); + + to_next[0] = bi0; + to_next += 1; + + cipher_alg = vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + is_aead = cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD; + if (is_aead) + auth_alg = cipher_alg; + + trunc_size = auth_alg->trunc_size; + + iv_size = cipher_alg->iv_len; + + ipsec_sa_anti_replay_advance (sa0, + clib_host_to_net_u32 (esp0->seq)); + + /* if UDP encapsulation is used adjust the address of the IP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0) + && (b0->flags & VNET_BUFFER_F_IS_IP4)) + { + udp_encap_adv = sizeof (udp_header_t); + } + + if (b0->flags & VNET_BUFFER_F_IS_IP4) + ih4 = (ip4_header_t *) + ((u8 *) esp0 - udp_encap_adv - sizeof (ip4_header_t)); + else + ih4 = (ip4_header_t *) ((u8 *) esp0 - sizeof (ip6_header_t)); + + vlib_buffer_advance (b0, sizeof (esp_header_t) + iv_size); + + b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + f0 = (esp_footer_t *) (vlib_buffer_get_tail (b0) - trunc_size - 2); + b0->current_length -= (f0->pad_length + trunc_size + 2); +#if 0 + /* check padding */ + const u8 *padding = vlib_buffer_get_tail (b0); + if (PREDICT_FALSE (memcmp (padding, pad_data, f0->pad_length))) + { + clib_warning ("bad padding"); + vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } +#endif + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) + { + if (f0->next_header == IP_PROTOCOL_IP_IN_IP) + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + else if (f0->next_header == IP_PROTOCOL_IPV6) + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + else + { + clib_warning ("next header: 0x%x", f0->next_header); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } + } + else /* transport mode */ + { + if ((ih4->ip_version_and_header_length & 0xF0) == 0x40) + { + u16 ih4_len = ip4_header_bytes (ih4); + vlib_buffer_advance (b0, -ih4_len); + next0 = ESP_DECRYPT_NEXT_IP4_INPUT; + + oh4 = vlib_buffer_get_current (b0); + memmove (oh4, ih4, ih4_len); + oh4->protocol = f0->next_header; + oh4->length = clib_host_to_net_u16 (b0->current_length); + oh4->checksum = ip4_header_checksum (oh4); + } + else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + { + ih6 = (ip6_header_t *) ih4; + vlib_buffer_advance (b0, -sizeof (ip6_header_t)); + oh6 = vlib_buffer_get_current (b0); + memmove (oh6, ih6, sizeof (ip6_header_t)); + + next0 = ESP_DECRYPT_NEXT_IP6_INPUT; + oh6->protocol = f0->next_header; + u16 len = b0->current_length - sizeof (ip6_header_t); + oh6->payload_length = clib_host_to_net_u16 (len); + } + else + { + clib_warning ("next header: 0x%x", f0->next_header); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + goto trace; + } + } + + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_decrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + ih4 = vlib_buffer_get_current (b0); + clib_memcpy_fast (tr->packet_data, ih4, sizeof (ip6_header_t)); + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, bi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_decrypt_post_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node) = { + .name = "dpdk-esp4-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_decrypt_post_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_post_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node) = { + .name = "dpdk-esp6-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/esp_encrypt.c b/extras/deprecated/dpdk-ipsec/esp_encrypt.c new file mode 100644 index 00000000000..ce1b5795995 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/esp_encrypt.c @@ -0,0 +1,709 @@ +/* + * esp_encrypt.c : IPSec ESP encrypt node using DPDK Cryptodev + * + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define foreach_esp_encrypt_next \ +_(DROP, "error-drop") \ +_(IP4_LOOKUP, "ip4-lookup") \ +_(IP6_LOOKUP, "ip6-lookup") \ +_(INTERFACE_OUTPUT, "interface-output") + +#define _(v, s) ESP_ENCRYPT_NEXT_##v, +typedef enum +{ + foreach_esp_encrypt_next +#undef _ + ESP_ENCRYPT_N_NEXT, +} esp_encrypt_next_t; + +#define foreach_esp_encrypt_error \ + _(RX_PKTS, "ESP pkts received") \ + _(SEQ_CYCLED, "Sequence number cycled") \ + _(ENQ_FAIL, "Enqueue encrypt failed (queue full)") \ + _(DISCARD, "Not enough crypto operations") \ + _(SESSION, "Failed to get crypto session") \ + _(NOSUP, "Cipher/Auth not supported") + + +typedef enum +{ +#define _(sym,str) ESP_ENCRYPT_ERROR_##sym, + foreach_esp_encrypt_error +#undef _ + ESP_ENCRYPT_N_ERROR, +} esp_encrypt_error_t; + +static char *esp_encrypt_error_strings[] = { +#define _(sym,string) string, + foreach_esp_encrypt_error +#undef _ +}; + +extern vlib_node_registration_t dpdk_esp4_encrypt_node; +extern vlib_node_registration_t dpdk_esp6_encrypt_node; +extern vlib_node_registration_t dpdk_esp4_encrypt_tun_node; +extern vlib_node_registration_t dpdk_esp6_encrypt_tun_node; + +typedef struct +{ + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; + u8 packet_data[64]; +} esp_encrypt_trace_t; + +/* packet trace format function */ +static u8 * +format_esp_encrypt_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *); + ip4_header_t *ih4 = (ip4_header_t *) t->packet_data; + u32 indent = format_get_indent (s), offset; + + s = format (s, "cipher %U auth %U\n", + format_ipsec_crypto_alg, t->crypto_alg, + format_ipsec_integ_alg, t->integ_alg); + + if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + { + s = format (s, "%U%U", format_white_space, indent, + format_ip6_header, ih4); + offset = sizeof (ip6_header_t); + } + else + { + s = format (s, "%U%U", format_white_space, indent, + format_ip4_header, ih4); + offset = ip4_header_bytes (ih4); + } + + s = format (s, "\n%U%U", format_white_space, indent, + format_esp_header, t->packet_data + offset); + + return s; +} + +always_inline uword +dpdk_esp_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6, int is_tun) +{ + u32 n_left_from, *from, *to_next, next_index, thread_index; + ipsec_main_t *im = &ipsec_main; + vnet_main_t *vnm = vnet_get_main (); + u32 thread_idx = vlib_get_thread_index (); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res = 0; + ipsec_sa_t *sa0 = 0; + crypto_alg_t *cipher_alg = 0, *auth_alg = 0; + struct rte_cryptodev_sym_session *session = 0; + u32 ret, last_sa_index = ~0; + u8 numa = rte_socket_id (); + u8 is_aead = 0; + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_idx); + struct rte_crypto_op **ops = cwm->ops; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + thread_index = vm->thread_index; + + ret = crypto_alloc_ops (numa, ops, n_left_from); + if (ret) + { + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, n_left_from); + else + vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, n_left_from); + /* Discard whole frame */ + vlib_buffer_free (vm, from, n_left_from); + return n_left_from; + } + + next_index = ESP_ENCRYPT_NEXT_DROP; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + clib_error_t *error; + u32 bi0, bi1; + vlib_buffer_t *b0, *b1; + u32 sa_index0; + ip4_and_esp_header_t *ih0, *oh0 = 0; + ip6_and_esp_header_t *ih6_0, *oh6_0 = 0; + ip4_and_udp_and_esp_header_t *ouh0 = 0; + esp_header_t *esp0; + esp_footer_t *f0; + u8 next_hdr_type; + u32 iv_size; + u16 orig_sz; + u8 trunc_size; + u16 rewrite_len; + u16 udp_encap_adv = 0; + struct rte_mbuf *mb0; + struct rte_crypto_op *op; + u16 res_idx; + + bi0 = from[0]; + from += 1; + n_left_from -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ih0 = vlib_buffer_get_current (b0); + mb0 = rte_mbuf_from_vlib_buffer (b0); + + /* ih0/ih6_0 */ + CLIB_PREFETCH (ih0, sizeof (ih6_0[0]), LOAD); + /* f0 */ + CLIB_PREFETCH (vlib_buffer_get_tail (b0), 20, STORE); + /* mb0 */ + CLIB_PREFETCH (mb0, CLIB_CACHE_LINE_BYTES, STORE); + + if (n_left_from > 1) + { + bi1 = from[1]; + b1 = vlib_get_buffer (vm, bi1); + + CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b1->data - CLIB_CACHE_LINE_BYTES, + CLIB_CACHE_LINE_BYTES, STORE); + } + + op = ops[0]; + ops += 1; + ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED); + + dpdk_op_priv_t *priv = crypto_op_get_priv (op); + /* store bi in op private */ + priv->bi = bi0; + priv->encrypt = 1; + + u16 op_len = + sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]); + CLIB_PREFETCH (op, op_len, STORE); + + if (is_tun) + { + /* we are on a ipsec tunnel's feature arc */ + vnet_buffer (b0)->ipsec.sad_index = + sa_index0 = ipsec_tun_protect_get_sa_out + (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + } + else + sa_index0 = vnet_buffer (b0)->ipsec.sad_index; + + if (sa_index0 != last_sa_index) + { + sa0 = ipsec_sa_get (sa_index0); + + cipher_alg = + vec_elt_at_index (dcm->cipher_algs, sa0->crypto_alg); + auth_alg = vec_elt_at_index (dcm->auth_algs, sa0->integ_alg); + + is_aead = (cipher_alg->type == RTE_CRYPTO_SYM_XFORM_AEAD); + + if (is_aead) + auth_alg = cipher_alg; + + res_idx = get_resource (cwm, sa0); + + if (PREDICT_FALSE (res_idx == (u16) ~ 0)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + res = vec_elt_at_index (dcm->resource, res_idx); + + error = crypto_get_session (&session, sa_index0, res, cwm, 1); + if (PREDICT_FALSE (error || !session)) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + last_sa_index = sa_index0; + } + + if (PREDICT_FALSE (esp_seq_advance (sa0))) + { + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + //TODO: rekey SA + to_next[0] = bi0; + to_next += 1; + n_left_to_next -= 1; + goto trace; + } + + orig_sz = b0->current_length; + + /* TODO multi-seg support - total_length_not_including_first_buffer */ + vlib_increment_combined_counter + (&ipsec_sa_counters, thread_index, sa_index0, + 1, b0->current_length); + + /* Update tunnel interface tx counters */ + if (is_tun) + vlib_increment_combined_counter + (vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, + thread_index, vnet_buffer (b0)->sw_if_index[VLIB_TX], + 1, b0->current_length); + + res->ops[res->n_ops] = op; + res->bi[res->n_ops] = bi0; + res->n_ops += 1; + + dpdk_gcm_cnt_blk *icb = &priv->cb; + + crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); + + iv_size = cipher_alg->iv_len; + trunc_size = auth_alg->trunc_size; + + /* if UDP encapsulation is used adjust the address of the IP header */ + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && !is_ip6) + udp_encap_adv = sizeof (udp_header_t); + + if (ipsec_sa_is_set_IS_TUNNEL (sa0)) + { + rewrite_len = 0; + if (!ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)) /* ip4 */ + { + /* in tunnel mode send it back to FIB */ + priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP; + u8 adv = sizeof (ip4_header_t) + udp_encap_adv + + sizeof (esp_header_t) + iv_size; + vlib_buffer_advance (b0, -adv); + oh0 = vlib_buffer_get_current (b0); + ouh0 = vlib_buffer_get_current (b0); + next_hdr_type = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + /* + * oh0->ip4.ip_version_and_header_length = 0x45; + * oh0->ip4.tos = ih0->ip4.tos; + * oh0->ip4.fragment_id = 0; + * oh0->ip4.flags_and_fragment_offset = 0; + */ + oh0->ip4.checksum_data_64[0] = + clib_host_to_net_u64 (0x45ULL << 56); + /* + * oh0->ip4.ttl = 254; + * oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + */ + oh0->ip4.checksum_data_32[2] = + clib_host_to_net_u32 (0xfe320000); + + oh0->ip4.src_address.as_u32 = + sa0->tunnel.t_src.ip.ip4.as_u32; + oh0->ip4.dst_address.as_u32 = + sa0->tunnel.t_dst.ip.ip4.as_u32; + + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) + { + oh0->ip4.protocol = IP_PROTOCOL_UDP; + esp0 = &ouh0->esp; + } + else + esp0 = &oh0->esp; + esp0->spi = clib_host_to_net_u32 (sa0->spi); + esp0->seq = clib_host_to_net_u32 (sa0->seq); + } + else + { + /* ip6 */ + /* in tunnel mode send it back to FIB */ + priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP; + + u8 adv = + sizeof (ip6_header_t) + sizeof (esp_header_t) + iv_size; + vlib_buffer_advance (b0, -adv); + ih6_0 = (ip6_and_esp_header_t *) ih0; + oh6_0 = vlib_buffer_get_current (b0); + + next_hdr_type = (is_ip6 ? + IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); + + oh6_0->ip6.ip_version_traffic_class_and_flow_label = + ih6_0->ip6.ip_version_traffic_class_and_flow_label; + + oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + oh6_0->ip6.hop_limit = 254; + oh6_0->ip6.src_address.as_u64[0] = + sa0->tunnel.t_src.ip.ip6.as_u64[0]; + oh6_0->ip6.src_address.as_u64[1] = + sa0->tunnel.t_src.ip.ip6.as_u64[1]; + oh6_0->ip6.dst_address.as_u64[0] = + sa0->tunnel.t_dst.ip.ip6.as_u64[0]; + oh6_0->ip6.dst_address.as_u64[1] = + sa0->tunnel.t_dst.ip.ip6.as_u64[1]; + esp0 = &oh6_0->esp; + oh6_0->esp.spi = clib_host_to_net_u32 (sa0->spi); + oh6_0->esp.seq = clib_host_to_net_u32 (sa0->seq); + } + + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + else /* transport mode */ + { + if (is_tun) + { + rewrite_len = 0; + priv->next = DPDK_CRYPTO_INPUT_NEXT_MIDCHAIN; + } + else + { + priv->next = DPDK_CRYPTO_INPUT_NEXT_INTERFACE_OUTPUT; + rewrite_len = vnet_buffer (b0)->ip.save_rewrite_length; + } + u16 adv = sizeof (esp_header_t) + iv_size + udp_encap_adv; + vlib_buffer_advance (b0, -adv - rewrite_len); + u8 *src = ((u8 *) ih0) - rewrite_len; + u8 *dst = vlib_buffer_get_current (b0); + oh0 = vlib_buffer_get_current (b0) + rewrite_len; + ouh0 = vlib_buffer_get_current (b0) + rewrite_len; + + if (is_ip6) + { + orig_sz -= sizeof (ip6_header_t); + ih6_0 = (ip6_and_esp_header_t *) ih0; + next_hdr_type = ih6_0->ip6.protocol; + memmove (dst, src, rewrite_len + sizeof (ip6_header_t)); + oh6_0 = (ip6_and_esp_header_t *) oh0; + oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = &oh6_0->esp; + } + else /* ipv4 */ + { + u16 ip_size = ip4_header_bytes (&ih0->ip4); + orig_sz -= ip_size; + next_hdr_type = ih0->ip4.protocol; + memmove (dst, src, rewrite_len + ip_size); + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); + if (ipsec_sa_is_set_UDP_ENCAP (sa0)) + { + oh0->ip4.protocol = IP_PROTOCOL_UDP; + esp0 = (esp_header_t *) + (((u8 *) oh0) + ip_size + udp_encap_adv); + } + else + { + oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP; + esp0 = (esp_header_t *) (((u8 *) oh0) + ip_size); + } + } + esp0->spi = clib_host_to_net_u32 (sa0->spi); + esp0->seq = clib_host_to_net_u32 (sa0->seq); + } + + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) + { + ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec); + ouh0->udp.checksum = 0; + } + ASSERT (is_pow2 (cipher_alg->boundary)); + u16 mask = cipher_alg->boundary - 1; + u16 pad_payload_len = ((orig_sz + 2) + mask) & ~mask; + u8 pad_bytes = pad_payload_len - 2 - orig_sz; + + u8 *padding = + vlib_buffer_put_uninit (b0, pad_bytes + 2 + trunc_size); + + /* The extra pad bytes would be overwritten by the digest */ + if (pad_bytes) + clib_memcpy_fast (padding, pad_data, 16); + + f0 = (esp_footer_t *) (padding + pad_bytes); + f0->pad_length = pad_bytes; + f0->next_header = next_hdr_type; + + if (oh6_0) + { + u16 len = b0->current_length - sizeof (ip6_header_t); + oh6_0->ip6.payload_length = + clib_host_to_net_u16 (len - rewrite_len); + } + else if (oh0) + { + oh0->ip4.length = + clib_host_to_net_u16 (b0->current_length - rewrite_len); + oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); + if (ipsec_sa_is_set_UDP_ENCAP (sa0) && ouh0) + { + ouh0->udp.length = + clib_host_to_net_u16 (clib_net_to_host_u16 + (ouh0->ip4.length) - + ip4_header_bytes (&ouh0->ip4)); + } + } + else /* should never happen */ + clib_warning ("No outer header found for ESP packet"); + + b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + + /* mbuf packet starts at ESP header */ + mb0->data_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); + mb0->pkt_len = vlib_buffer_get_tail (b0) - ((u8 *) esp0); + mb0->data_off = ((void *) esp0) - mb0->buf_addr; + + u32 cipher_off, cipher_len, auth_len = 0; + u32 *aad = NULL; + + u8 *digest = vlib_buffer_get_tail (b0) - trunc_size; + u64 digest_paddr = mb0->buf_iova + digest - ((u8 *) mb0->buf_addr); + + if (!is_aead && (cipher_alg->alg == RTE_CRYPTO_CIPHER_AES_CBC || + cipher_alg->alg == RTE_CRYPTO_CIPHER_NULL)) + { + cipher_off = sizeof (esp_header_t); + cipher_len = iv_size + pad_payload_len; + } + else /* CTR/GCM */ + { + u32 *esp_iv = (u32 *) (esp0 + 1); + esp_iv[0] = sa0->seq; + esp_iv[1] = sa0->seq_hi; + + cipher_off = sizeof (esp_header_t) + iv_size; + cipher_len = pad_payload_len; + } + + if (is_aead) + { + aad = (u32 *) priv->aad; + aad[0] = esp0->spi; + + /* aad[3] should always be 0 */ + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0))) + { + aad[1] = clib_host_to_net_u32 (sa0->seq_hi); + aad[2] = esp0->seq; + } + else + { + aad[1] = esp0->seq; + aad[2] = 0; + } + } + else + { + auth_len = + vlib_buffer_get_tail (b0) - ((u8 *) esp0) - trunc_size; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 *_digest = (u32 *) digest; + _digest[0] = clib_host_to_net_u32 (sa0->seq_hi); + auth_len += 4; + } + } + + crypto_op_setup (is_aead, mb0, op, session, cipher_off, cipher_len, + 0, auth_len, (u8 *) aad, digest, digest_paddr); + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + esp_encrypt_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->crypto_alg = sa0->crypto_alg; + tr->integ_alg = sa0->integ_alg; + u8 *p = vlib_buffer_get_current (b0); + if (!ipsec_sa_is_set_IS_TUNNEL (sa0) && !is_tun) + p += vnet_buffer (b0)->ip.save_rewrite_length; + clib_memcpy_fast (tr->packet_data, p, sizeof (tr->packet_data)); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + if (is_ip6) + { + vlib_node_increment_counter (vm, + (is_tun ? + dpdk_esp6_encrypt_tun_node.index : + dpdk_esp6_encrypt_node.index), + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); + } + else + { + vlib_node_increment_counter (vm, + (is_tun ? + dpdk_esp4_encrypt_tun_node.index : + dpdk_esp4_encrypt_node.index), + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ ); + } + + crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (dpdk_esp4_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = { + .name = "dpdk-esp4-encrypt", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_encrypt_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 0); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = { + .name = "dpdk-esp6-encrypt", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp4_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_encrypt_tun_node) = { + .name = "dpdk-esp4-encrypt-tun", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FN (dpdk_esp6_encrypt_tun_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ , 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp6_encrypt_tun_node) = { + .name = "dpdk-esp6-encrypt-tun", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/ipsec.c b/extras/deprecated/dpdk-ipsec/ipsec.c new file mode 100644 index 00000000000..e260ba7dcc4 --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/ipsec.c @@ -0,0 +1,1087 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +dpdk_crypto_main_t dpdk_crypto_main; + +#define EMPTY_STRUCT {0} +#define NUM_CRYPTO_MBUFS 16384 + +static void +algos_init (u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *a; + + vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8); + + { +#define _(v,f,str) \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains; + foreach_ipsec_crypto_alg +#undef _ + } + + /* Minimum boundary for ciphers is 4B, required by ESP */ + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_NULL; + a->boundary = 4; /* 1 */ + a->key_len = 0; + a->iv_len = 0; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 16; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 24; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 32; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; + +#define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD +#define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; + a->trunc_size = 16; + + vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1); + + { +#define _(v,f,str) \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains; + foreach_ipsec_integ_alg +#undef _ + } + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_NULL; + a->key_len = 0; + a->trunc_size = 0; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_MD5_HMAC; + a->key_len = 16; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC; + a->key_len = 20; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 16; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC; + a->key_len = 48; + a->trunc_size = 24; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC; + a->key_len = 64; + a->trunc_size = 32; +} + +static u8 +cipher_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + return (alg - dcm->cipher_algs); +} + +static u8 +auth_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + return (alg - dcm->auth_algs); +} + +static crypto_alg_t * +cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + return NULL; + + /* *INDENT-OFF* */ + vec_foreach (alg, dcm->cipher_algs) + { + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (cap->sym.cipher.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (cap->sym.aead.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; + } + /* *INDENT-ON* */ + + return NULL; +} + +static crypto_alg_t * +auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) || + (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)) + return NULL; + + /* *INDENT-OFF* */ + vec_foreach (alg, dcm->auth_algs) + { + if ((cap->sym.auth.algo == alg->alg) && + (alg->trunc_size == trunc_size)) + return alg; + } + /* *INDENT-ON* */ + + return NULL; +} + +static void +crypto_set_aead_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; + + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_AEAD); + + xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; + xform->aead.algo = c->alg; + xform->aead.key.data = sa->crypto_key.data; + xform->aead.key.length = c->key_len; + xform->aead.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->aead.iv.length = 12; + xform->aead.digest_length = c->trunc_size; + xform->aead.aad_length = ipsec_sa_is_set_USE_ESN (sa) ? 12 : 8; + xform->next = NULL; + + if (is_outbound) + xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + else + xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; +} + +static void +crypto_set_cipher_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; + + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_CIPHER); + + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform->cipher.algo = c->alg; + xform->cipher.key.data = sa->crypto_key.data; + xform->cipher.key.length = c->key_len; + xform->cipher.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->cipher.iv.length = c->iv_len; + xform->next = NULL; + + if (is_outbound) + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + else + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; +} + +static void +crypto_set_auth_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *a; + + a = vec_elt_at_index (dcm->auth_algs, sa->integ_alg); + + ASSERT (a->type == RTE_CRYPTO_SYM_XFORM_AUTH); + + xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; + xform->auth.algo = a->alg; + xform->auth.key.data = sa->integ_key.data; + xform->auth.key.length = a->key_len; + xform->auth.digest_length = a->trunc_size; + xform->next = NULL; + + if (is_outbound) + xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + else + xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; +} + +clib_error_t * +create_sym_session (struct rte_cryptodev_sym_session **session, + u32 sa_idx, + crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + ipsec_sa_t *sa; + struct rte_crypto_sym_xform cipher_xform = { 0 }; + struct rte_crypto_sym_xform auth_xform = { 0 }; + struct rte_crypto_sym_xform *xfs; + struct rte_cryptodev_sym_session **s; + clib_error_t *error = 0; + + sa = ipsec_sa_get (sa_idx); + + if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)) + { + crypto_set_aead_xform (&cipher_xform, sa, is_outbound); + xfs = &cipher_xform; + } + else + { + crypto_set_cipher_xform (&cipher_xform, sa, is_outbound); + crypto_set_auth_xform (&auth_xform, sa, is_outbound); + + if (is_outbound) + { + cipher_xform.next = &auth_xform; + xfs = &cipher_xform; + } + else + { + auth_xform.next = &cipher_xform; + xfs = &auth_xform; + } + } + + data = vec_elt_at_index (dcm->data, res->numa); + clib_spinlock_lock_if_init (&data->lockp); + + /* + * DPDK_VER >= 1708: + * Multiple worker/threads share the session for an SA + * Single session per SA, initialized for each device driver + */ + s = (void *) hash_get (data->session_by_sa_index, sa_idx); + + if (!s) + { + session[0] = rte_cryptodev_sym_session_create (data->session_h); + if (!session[0]) + { + data->session_h_failed += 1; + error = clib_error_return (0, "failed to create session header"); + goto done; + } + hash_set (data->session_by_sa_index, sa_idx, session[0]); + } + else + session[0] = s[0]; + + struct rte_mempool **mp; + mp = vec_elt_at_index (data->session_drv, res->drv_id); + ASSERT (mp[0] != NULL); + + i32 ret = + rte_cryptodev_sym_session_init (res->dev_id, session[0], xfs, mp[0]); + if (ret) + { + data->session_drv_failed[res->drv_id] += 1; + error = clib_error_return (0, "failed to init session for drv %u", + res->drv_id); + goto done; + } + + add_session_by_drv_and_sa_idx (session[0], data, res->drv_id, sa_idx); + +done: + clib_spinlock_unlock_if_init (&data->lockp); + return error; +} + +static void __attribute__ ((unused)) clear_and_free_obj (void *obj) +{ + struct rte_mempool *mp = rte_mempool_from_obj (obj); + + clib_memset (obj, 0, mp->elt_size); + + rte_mempool_put (mp, obj); +} + +/* This is from rte_cryptodev_pmd.h */ +static inline void * +get_session_private_data (const struct rte_cryptodev_sym_session *sess, + uint8_t driver_id) +{ +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + return sess->sess_private_data[driver_id]; +#else + if (unlikely (sess->nb_drivers <= driver_id)) + return 0; + + return sess->sess_data[driver_id].data; +#endif +} + +/* This is from rte_cryptodev_pmd.h */ +static inline void +set_session_private_data (struct rte_cryptodev_sym_session *sess, + uint8_t driver_id, void *private_data) +{ +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + sess->sess_private_data[driver_id] = private_data; +#else + if (unlikely (sess->nb_drivers <= driver_id)) + return; + sess->sess_data[driver_id].data = private_data; +#endif +} + +static clib_error_t * +dpdk_crypto_session_disposal (crypto_session_disposal_t * v, u64 ts) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_session_disposal_t *s; + void *drv_session; + u32 drv_id; + i32 ret; + + /* *INDENT-OFF* */ + vec_foreach (s, v) + { + /* ordered vector by timestamp */ + if (!(s->ts + dcm->session_timeout < ts)) + break; + + vec_foreach_index (drv_id, dcm->drv) + { + drv_session = get_session_private_data (s->session, drv_id); + if (!drv_session) + continue; + + /* + * Custom clear to avoid finding a dev_id for drv_id: + * ret = rte_cryptodev_sym_session_clear (dev_id, drv_session); + * ASSERT (!ret); + */ + clear_and_free_obj (drv_session); + + set_session_private_data (s->session, drv_id, NULL); + } + + if (rte_mempool_from_obj(s->session)) + { + ret = rte_cryptodev_sym_session_free (s->session); + ASSERT (!ret); + } + } + /* *INDENT-ON* */ + + if (s < vec_end (v)) + vec_delete (v, s - v, 0); + else + vec_reset_length (v); + + return 0; +} + +static clib_error_t * +add_del_sa_session (u32 sa_index, u8 is_add) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + struct rte_cryptodev_sym_session *s; + uword *val; + u32 drv_id; + + if (is_add) + return 0; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + clib_spinlock_lock_if_init (&data->lockp); + val = hash_get (data->session_by_sa_index, sa_index); + if (val) + { + s = (struct rte_cryptodev_sym_session *) val[0]; + vec_foreach_index (drv_id, dcm->drv) + { + val = (uword*) get_session_by_drv_and_sa_idx (data, drv_id, sa_index); + if (val) + add_session_by_drv_and_sa_idx(NULL, data, drv_id, sa_index); + } + + hash_unset (data->session_by_sa_index, sa_index); + + u64 ts = unix_time_now_nsec (); + dpdk_crypto_session_disposal (data->session_disposal, ts); + + crypto_session_disposal_t sd; + sd.ts = ts; + sd.session = s; + + vec_add1 (data->session_disposal, sd); + } + clib_spinlock_unlock_if_init (&data->lockp); + } + /* *INDENT-ON* */ + + return 0; +} + +static clib_error_t * +dpdk_ipsec_check_support (ipsec_sa_t * sa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) + switch (sa->crypto_alg) + { + case IPSEC_CRYPTO_ALG_NONE: + case IPSEC_CRYPTO_ALG_AES_GCM_128: + case IPSEC_CRYPTO_ALG_AES_GCM_192: + case IPSEC_CRYPTO_ALG_AES_GCM_256: + break; + default: + return clib_error_return (0, "unsupported integ-alg %U crypto-alg %U", + format_ipsec_integ_alg, sa->integ_alg, + format_ipsec_crypto_alg, sa->crypto_alg); + } + + /* XXX do we need the NONE check? */ + if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE && + dcm->cipher_algs[sa->crypto_alg].disabled) + return clib_error_return (0, "disabled crypto-alg %U", + format_ipsec_crypto_alg, sa->crypto_alg); + + /* XXX do we need the NONE check? */ + if (sa->integ_alg != IPSEC_INTEG_ALG_NONE && + dcm->auth_algs[sa->integ_alg].disabled) + return clib_error_return (0, "disabled integ-alg %U", + format_ipsec_integ_alg, sa->integ_alg); + return NULL; +} + +static void +crypto_parse_capabilities (crypto_dev_t * dev, + const struct rte_cryptodev_capabilities *cap, + u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + u8 len, inc; + + for (; cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; cap++) + { + /* A single capability maps to multiple cipher/auth algorithms */ + switch (cap->sym.xform_type) + { + case RTE_CRYPTO_SYM_XFORM_AEAD: + case RTE_CRYPTO_SYM_XFORM_CIPHER: + inc = cap->sym.cipher.key_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.cipher.key_size.min; + len <= cap->sym.cipher.key_size.max; len += inc) + { + alg = cipher_cap_to_alg (cap, len); + if (!alg) + continue; + dev->cipher_support[cipher_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + inc = cap->sym.auth.digest_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.auth.digest_size.min; + len <= cap->sym.auth.digest_size.max; len += inc) + { + alg = auth_cap_to_alg (cap, len); + if (!alg) + continue; + dev->auth_support[auth_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + default: + ; + } + } +} + +static clib_error_t * +crypto_dev_conf (u8 dev, u16 n_qp, u8 numa) +{ + struct rte_cryptodev_config dev_conf = { 0 }; + struct rte_cryptodev_qp_conf qp_conf = { 0 }; + i32 ret; + u16 qp; + char *error_str; + + dev_conf.socket_id = numa; + dev_conf.nb_queue_pairs = n_qp; + + error_str = "failed to configure crypto device %u"; + ret = rte_cryptodev_configure (dev, &dev_conf); + if (ret < 0) + return clib_error_return (0, error_str, dev); + + error_str = "failed to setup crypto device %u queue pair %u"; + qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; + for (qp = 0; qp < n_qp; qp++) + { +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa, NULL); +#else + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa); +#endif + if (ret < 0) + return clib_error_return (0, error_str, dev, qp); + } + + error_str = "failed to start crypto device %u"; + if (rte_cryptodev_start (dev)) + return clib_error_return (0, error_str, dev); + + return 0; +} + +static void +crypto_scan_devs (u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + struct rte_cryptodev *cryptodev; + struct rte_cryptodev_info info = { 0 }; + crypto_dev_t *dev; + crypto_resource_t *res; + clib_error_t *error; + u32 i; + u16 max_res_idx, res_idx, j; + u8 drv_id; + + vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1, + (crypto_dev_t) EMPTY_STRUCT); + + for (i = 0; i < rte_cryptodev_count (); i++) + { + dev = vec_elt_at_index (dcm->dev, i); + + cryptodev = &rte_cryptodevs[i]; + rte_cryptodev_info_get (i, &info); + + dev->id = i; + dev->name = cryptodev->data->name; + dev->numa = rte_cryptodev_socket_id (i); + dev->features = info.feature_flags; + dev->max_qp = info.max_nb_queue_pairs; + drv_id = info.driver_id; + if (drv_id >= vec_len (dcm->drv)) + vec_validate_init_empty (dcm->drv, drv_id, + (crypto_drv_t) EMPTY_STRUCT); + vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name; + dev->drv_id = drv_id; + vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i); + + if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + continue; + + if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa))) + { + clib_error_report (error); + continue; + } + + max_res_idx = dev->max_qp - 1; + + vec_validate (dev->free_resources, max_res_idx); + + res_idx = vec_len (dcm->resource); + vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx, + (crypto_resource_t) EMPTY_STRUCT, + CLIB_CACHE_LINE_BYTES); + + for (j = 0; j <= max_res_idx; j++) + { + vec_elt (dev->free_resources, max_res_idx - j) = res_idx + j; + res = &dcm->resource[res_idx + j]; + res->dev_id = i; + res->drv_id = drv_id; + res->qp_id = j; + res->numa = dev->numa; + res->thread_idx = (u16) ~ 0; + } + + crypto_parse_capabilities (dev, info.capabilities, n_mains); + } +} + +void +crypto_auto_placement (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res; + crypto_worker_main_t *cwm; + crypto_dev_t *dev; + u32 thread_idx, skip_master; + u16 res_idx, *idx; + u8 used; + u16 i; + + skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_foreach_index (thread_idx, dcm->workers_main) + { + if (vec_len (dev->free_resources) == 0) + break; + + if (thread_idx < skip_master) + continue; + + /* Check thread is not already using the device */ + vec_foreach (idx, dev->used_resources) + if (dcm->resource[idx[0]].thread_idx == thread_idx) + continue; + + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + used = 0; + res_idx = vec_pop (dev->free_resources); + + /* Set device only for supported algos */ + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i] && + cwm->cipher_resource_idx[i] == (u16) ~0) + { + dcm->cipher_algs[i].disabled--; + cwm->cipher_resource_idx[i] = res_idx; + used = 1; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i] && + cwm->auth_resource_idx[i] == (u16) ~0) + { + dcm->auth_algs[i].disabled--; + cwm->auth_resource_idx[i] = res_idx; + used = 1; + } + + if (!used) + { + vec_add1 (dev->free_resources, res_idx); + continue; + } + + vec_add1 (dev->used_resources, res_idx); + + res = vec_elt_at_index (dcm->resource, res_idx); + + ASSERT (res->thread_idx == (u16) ~0); + res->thread_idx = thread_idx; + + /* Add device to vector of polling resources */ + vec_add1 (cwm->resource_idx, res_idx); + } + } + /* *INDENT-ON* */ +} + +static void +crypto_op_init (struct rte_mempool *mempool, + void *_arg __attribute__ ((unused)), + void *_obj, unsigned i __attribute__ ((unused))) +{ + struct rte_crypto_op *op = _obj; + + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op->phys_addr = rte_mempool_virt2iova (_obj); + op->mempool = mempool; +} + +static clib_error_t * +crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + dpdk_config_main_t *conf = &dpdk_config_main; + crypto_data_t *data; + u8 *pool_name; + u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private); + struct rte_crypto_op_pool_private *priv; + struct rte_mempool *mp; + + data = vec_elt_at_index (dcm->data, numa); + + /* Already allocated */ + if (data->crypto_op) + return NULL; + + pool_name = format (0, "crypto_pool_numa%u%c", numa, 0); + + if (conf->num_crypto_mbufs == 0) + conf->num_crypto_mbufs = NUM_CRYPTO_MBUFS; + + mp = rte_mempool_create ((char *) pool_name, conf->num_crypto_mbufs, + crypto_op_len (), 512, pool_priv_size, NULL, NULL, + crypto_op_init, NULL, numa, 0); + + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create crypto op mempool"); + + /* Initialize mempool private data */ + priv = rte_mempool_get_priv (mp); + priv->priv_size = pool_priv_size; + priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + + data->crypto_op = mp; + + return NULL; +} + +static clib_error_t * +crypto_create_session_h_pool (vlib_main_t * vm, u8 numa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + struct rte_mempool *mp; + u32 elt_size; + + data = vec_elt_at_index (dcm->data, numa); + + if (data->session_h) + return NULL; + + pool_name = format (0, "session_h_pool_numa%u%c", numa, 0); + + + elt_size = rte_cryptodev_sym_get_header_session_size (); + +#if RTE_VERSION < RTE_VERSION_NUM(19, 2, 0, 0) + mp = rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); +#else + /* XXX Experimental tag in DPDK 19.02 */ + mp = rte_cryptodev_sym_session_pool_create ((char *) pool_name, + DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, numa); +#endif + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create crypto session mempool"); + + data->session_h = mp; + + return NULL; +} + +static clib_error_t * +crypto_create_session_drv_pool (vlib_main_t * vm, crypto_dev_t * dev) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + struct rte_mempool *mp; + u32 elt_size; + u8 numa = dev->numa; + + data = vec_elt_at_index (dcm->data, numa); + + vec_validate (data->session_drv, dev->drv_id); + vec_validate (data->session_drv_failed, dev->drv_id); + vec_validate_aligned (data->session_by_drv_id_and_sa_index, 32, + CLIB_CACHE_LINE_BYTES); + + if (data->session_drv[dev->drv_id]) + return NULL; + + pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0); + + elt_size = rte_cryptodev_sym_get_private_session_size (dev->id); + mp = + rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, + elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); + + vec_free (pool_name); + + if (!mp) + return clib_error_return (0, "failed to create session drv mempool"); + + data->session_drv[dev->drv_id] = mp; + clib_spinlock_init (&data->lockp); + + return NULL; +} + +static clib_error_t * +crypto_create_pools (vlib_main_t * vm) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + clib_error_t *error = NULL; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_validate_aligned (dcm->data, dev->numa, CLIB_CACHE_LINE_BYTES); + + error = crypto_create_crypto_op_pool (vm, dev->numa); + if (error) + return error; + + error = crypto_create_session_h_pool (vm, dev->numa); + if (error) + return error; + + error = crypto_create_session_drv_pool (vm, dev); + if (error) + return error; + } + /* *INDENT-ON* */ + + return NULL; +} + +static void +crypto_disable (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 i; + + dcm->enabled = 0; + + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + rte_mempool_free (data->crypto_op); + rte_mempool_free (data->session_h); + + vec_foreach_index (i, data->session_drv) + rte_mempool_free (data->session_drv[i]); + + vec_free (data->session_drv); + clib_spinlock_free (&data->lockp); + } + /* *INDENT-ON* */ + + vec_free (dcm->data); + vec_free (dcm->workers_main); + vec_free (dcm->dev); + vec_free (dcm->resource); + vec_free (dcm->cipher_algs); + vec_free (dcm->auth_algs); +} + +static clib_error_t * +dpdk_ipsec_enable_disable (int is_enable) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_thread_main_t *tm = vlib_get_thread_main (); + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input"); + u32 skip_master = vlib_num_workers () > 0; + u32 n_mains = tm->n_vlib_mains; + u32 i; + + ASSERT (node); + for (i = skip_master; i < n_mains; i++) + vlib_node_set_state (vlib_mains[i], node->index, is_enable != 0 ? + VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED); + + return 0; +} + +static clib_error_t * +dpdk_ipsec_main_init (vlib_main_t * vm) +{ + ipsec_main_t *im = &ipsec_main; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + crypto_worker_main_t *cwm; + clib_error_t *error = NULL; + u32 skip_master, n_mains; + + n_mains = tm->n_vlib_mains; + skip_master = vlib_num_workers () > 0; + + algos_init (n_mains - skip_master); + + crypto_scan_devs (n_mains - skip_master); + + if (!(dcm->enabled)) + { + vlib_log_warn (dpdk_main.log_default, + "not enough DPDK crypto resources"); + crypto_disable (); + return 0; + } + + dcm->session_timeout = 10e9; + + vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1, + (crypto_worker_main_t) EMPTY_STRUCT, + CLIB_CACHE_LINE_BYTES); + + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + vec_validate_init_empty_aligned (cwm->ops, VLIB_FRAME_SIZE - 1, 0, + CLIB_CACHE_LINE_BYTES); + clib_memset (cwm->cipher_resource_idx, ~0, + IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx)); + clib_memset (cwm->auth_resource_idx, ~0, + IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx)); + } + /* *INDENT-ON* */ + + crypto_auto_placement (); + + error = crypto_create_pools (vm); + if (error) + { + clib_error_report (error); + crypto_disable (); + return 0; + } + + u32 idx = ipsec_register_esp_backend ( + vm, im, "dpdk backend", "dpdk-esp4-encrypt", "dpdk-esp4-encrypt-tun", + "dpdk-esp4-decrypt", "dpdk-esp4-decrypt", "dpdk-esp6-encrypt", + "dpdk-esp6-encrypt-tun", "dpdk-esp6-decrypt", "dpdk-esp6-decrypt", + "error-drop", dpdk_ipsec_check_support, add_del_sa_session, + dpdk_ipsec_enable_disable); + int rv; + if (im->esp_current_backend == ~0) + { + rv = ipsec_select_esp_backend (im, idx); + ASSERT (rv == 0); + } + return 0; +} + +VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_main_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/extras/deprecated/dpdk-ipsec/ipsec.h b/extras/deprecated/dpdk-ipsec/ipsec.h new file mode 100644 index 00000000000..368120e18fa --- /dev/null +++ b/extras/deprecated/dpdk-ipsec/ipsec.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2017 Intel and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __DPDK_IPSEC_H__ +#define __DPDK_IPSEC_H__ + +#include +#include +#include +#include + +#undef always_inline +#include +#include +#include + +#if CLIB_DEBUG > 0 +#define always_inline static inline +#else +#define always_inline static inline __attribute__ ((__always_inline__)) +#endif + +#define DPDK_CRYPTO_N_QUEUE_DESC 2048 +#define DPDK_CRYPTO_NB_SESS_OBJS 20000 + +#define foreach_dpdk_crypto_input_next \ + _(DROP, "error-drop") \ + _(IP4_LOOKUP, "ip4-lookup") \ + _(IP6_LOOKUP, "ip6-lookup") \ + _(INTERFACE_OUTPUT, "interface-output") \ + _(MIDCHAIN, "adj-midchain-tx") \ + _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \ + _(DECRYPT6_POST, "dpdk-esp6-decrypt-post") + +typedef enum +{ +#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f, + foreach_dpdk_crypto_input_next +#undef _ + DPDK_CRYPTO_INPUT_N_NEXT, +} dpdk_crypto_input_next_t; + +#define MAX_QP_PER_LCORE 16 + +typedef struct +{ + u32 salt; + u32 iv[2]; + u32 cnt; +} dpdk_gcm_cnt_blk; + +typedef struct +{ + u32 next; + u32 bi; + u8 encrypt; + CLIB_ALIGN_MARK (mark0, 16); + dpdk_gcm_cnt_blk cb; + u8 aad[16]; + u8 icv[32]; /* XXX last 16B in next cache line */ +} dpdk_op_priv_t; + +typedef struct +{ + u16 *resource_idx; + struct rte_crypto_op **ops; + u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG]; + u16 auth_resource_idx[IPSEC_INTEG_N_ALG]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_worker_main_t; + +typedef struct +{ + CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */ + char *name; + enum rte_crypto_sym_xform_type type; + u32 alg; + u8 key_len; + u8 iv_len; + u8 trunc_size; + u8 boundary; + u8 disabled; + u8 resources; +} crypto_alg_t; + +typedef struct +{ + u16 *free_resources; + u16 *used_resources; + u8 cipher_support[IPSEC_CRYPTO_N_ALG]; + u8 auth_support[IPSEC_INTEG_N_ALG]; + u8 drv_id; + u8 numa; + u16 id; + const char *name; + u32 max_qp; + u64 features; +} crypto_dev_t; + +typedef struct +{ + const char *name; + u16 *devs; +} crypto_drv_t; + +typedef struct +{ + u16 thread_idx; + u8 remove; + u8 drv_id; + u8 dev_id; + u8 numa; + u16 qp_id; + u16 inflights[2]; + u16 n_ops; + u16 __unused; + struct rte_crypto_op *ops[VLIB_FRAME_SIZE]; + u32 bi[VLIB_FRAME_SIZE]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_resource_t; + +typedef struct +{ + u64 ts; + struct rte_cryptodev_sym_session *session; +} crypto_session_disposal_t; + +typedef struct +{ + struct rte_cryptodev_sym_session *session; + u64 dev_mask; + CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */ +} crypto_session_by_drv_t; + +typedef struct +{ + struct rte_mempool *crypto_op; + struct rte_mempool *session_h; + struct rte_mempool **session_drv; + crypto_session_disposal_t *session_disposal; + uword *session_by_sa_index; + u64 crypto_op_get_failed; + u64 session_h_failed; + u64 *session_drv_failed; + crypto_session_by_drv_t *session_by_drv_id_and_sa_index; + clib_spinlock_t lockp; + /* Required for vec_validate_aligned */ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); +} crypto_data_t; + +typedef struct +{ + crypto_worker_main_t *workers_main; + crypto_dev_t *dev; + crypto_resource_t *resource; + crypto_alg_t *cipher_algs; + crypto_alg_t *auth_algs; + crypto_data_t *data; + crypto_drv_t *drv; + u64 session_timeout; /* nsec */ + u8 enabled; +} dpdk_crypto_main_t; + +extern dpdk_crypto_main_t dpdk_crypto_main; + +static const u8 pad_data[] = + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 }; + +void crypto_auto_placement (void); + +clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session, + u32 sa_idx, crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound); + +static_always_inline u32 +crypto_op_len (void) +{ + const u32 align = 4; + u32 op_size = + sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); + + return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t); +} + +static_always_inline u32 +crypto_op_get_priv_offset (void) +{ + const u32 align = 16; + u32 offset; + + offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op); + offset = (offset + align - 1) & ~(align - 1); + + return offset; +} + +static_always_inline dpdk_op_priv_t * +crypto_op_get_priv (struct rte_crypto_op * op) +{ + return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ()); +} + + +static_always_inline void +add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session, + crypto_data_t * data, u32 drv_id, u32 sa_idx) +{ + crypto_session_by_drv_t *sbd; + vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx, + CLIB_CACHE_LINE_BYTES); + sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); + sbd->dev_mask |= 1L << drv_id; + sbd->session = session; +} + +static_always_inline struct rte_cryptodev_sym_session * +get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx) +{ + crypto_session_by_drv_t *sess_by_sa; + if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx) + return NULL; + sess_by_sa = + vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx); + return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL; +} + +static_always_inline clib_error_t * +crypto_get_session (struct rte_cryptodev_sym_session ** session, + u32 sa_idx, + crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + struct rte_cryptodev_sym_session *sess; + + data = vec_elt_at_index (dcm->data, res->numa); + sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx); + + if (PREDICT_FALSE (!sess)) + return create_sym_session (session, sa_idx, res, cwm, is_outbound); + + session[0] = sess; + + return NULL; +} + +static_always_inline u16 +get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa) +{ + u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg]; + u16 auth_res = cwm->auth_resource_idx[sa->integ_alg]; + u8 is_aead; + + /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */ + + is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)); + + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE) + return auth_res; + + if (cipher_res == auth_res) + return cipher_res; + + if (is_aead) + return cipher_res; + + return (u16) ~ 0; +} + +static_always_inline i32 +crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data = vec_elt_at_index (dcm->data, numa); + i32 ret; + + ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n); + + /* *INDENT-OFF* */ + data->crypto_op_get_failed += ! !ret; + /* *INDENT-ON* */ + + return ret; +} + +static_always_inline void +crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data = vec_elt_at_index (dcm->data, numa); + + if (!n) + return; + + rte_mempool_put_bulk (data->crypto_op, (void **) ops, n); +} + +static_always_inline void +crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, + u32 node_index, u32 error, u8 numa, u8 encrypt) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res; + u16 *res_idx; + + /* *INDENT-OFF* */ + vec_foreach (res_idx, cwm->resource_idx) + { + u16 enq, n_ops; + res = vec_elt_at_index (dcm->resource, res_idx[0]); + + if (!res->n_ops) + continue; + + n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt]; + n_ops = res->n_ops < n_ops ? res->n_ops : n_ops; + enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id, + res->ops, n_ops); + ASSERT (n_ops == enq); + res->inflights[encrypt] += enq; + + if (PREDICT_FALSE (enq < res->n_ops)) + { + crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq); + vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq); + + vlib_node_increment_counter (vm, node_index, error, + res->n_ops - enq); + } + res->n_ops = 0; + } + /* *INDENT-ON* */ +} + +static_always_inline void +crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi) +{ + icb->salt = salt; + icb->iv[0] = seq; + icb->iv[1] = seq_hi; +} + +static_always_inline void +crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0, + struct rte_crypto_op *op, void *session, + u32 cipher_off, u32 cipher_len, + u32 auth_off, u32 auth_len, + u8 * aad, u8 * digest, u64 digest_paddr) +{ + struct rte_crypto_sym_op *sym_op; + + sym_op = (struct rte_crypto_sym_op *) (op + 1); + + sym_op->m_src = mb0; + sym_op->session = session; + + if (is_aead) + { + sym_op->aead.data.offset = cipher_off; + sym_op->aead.data.length = cipher_len; + + sym_op->aead.aad.data = aad; + sym_op->aead.aad.phys_addr = + op->phys_addr + (uintptr_t) aad - (uintptr_t) op; + + sym_op->aead.digest.data = digest; + sym_op->aead.digest.phys_addr = digest_paddr; + } + else + { + sym_op->cipher.data.offset = cipher_off; + sym_op->cipher.data.length = cipher_len; + + sym_op->auth.data.offset = auth_off; + sym_op->auth.data.length = auth_len; + + sym_op->auth.digest.data = digest; + sym_op->auth.digest.phys_addr = digest_paddr; + } +} + +#endif /* __DPDK_IPSEC_H__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg