aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/dpdk/ipsec/ipsec.h
diff options
context:
space:
mode:
authorSergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>2017-08-26 15:22:05 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2017-10-05 09:54:34 +0000
commitdb93cd971320301eb21403caabada7a3ec6a4cce (patch)
tree01c57f4f476a97805411a74d665eedb72331f91e /src/plugins/dpdk/ipsec/ipsec.h
parent7939f904600018aeed9d8cc9d19ca37c7e96f3d1 (diff)
dpdk/ipsec: rework plus improved cli commands
This patch reworks the DPDK ipsec implementation including the cryptodev management as well as replacing new cli commands for better usability. For the data path: - The dpdk-esp-encrypt-post node is not necessary anymore. - IPv4 packets in the decrypt path are sent to ip4-input-no-checksum instead of ip4-input. The DPDK cryptodev cli commands are replaced by the following new commands: - show dpdk crypto devices - show dpdk crypto placement [verbose] - set dpdk crypto placement (<device> <thread> | auto) - clear dpdk crypto placement <device> [<thread>] - show dpdk crypto pools Change-Id: I47324517ede82d3e6e0e9f9c71c1a3433714b27b Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Diffstat (limited to 'src/plugins/dpdk/ipsec/ipsec.h')
-rw-r--r--src/plugins/dpdk/ipsec/ipsec.h432
1 files changed, 293 insertions, 139 deletions
diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h
index a94dd6821af..98e5ad53acb 100644
--- a/src/plugins/dpdk/ipsec/ipsec.h
+++ b/src/plugins/dpdk/ipsec/ipsec.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 Intel and/or its affiliates.
+ * Copyright (c) 2017 Intel and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,6 +16,8 @@
#define __DPDK_IPSEC_H__
#include <vnet/vnet.h>
+#include <vppinfra/cache.h>
+#include <vnet/ipsec/ipsec.h>
#undef always_inline
#include <rte_config.h>
@@ -28,6 +30,20 @@
#define always_inline static inline __attribute__ ((__always_inline__))
#endif
+#define foreach_dpdk_crypto_input_next \
+ _(DROP, "error-drop") \
+ _(IP4_LOOKUP, "ip4-lookup") \
+ _(IP6_LOOKUP, "ip6-lookup") \
+ _(INTERFACE_OUTPUT, "interface-output") \
+ _(DECRYPT_POST, "dpdk-esp-decrypt-post")
+
+typedef enum
+{
+#define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f,
+ foreach_dpdk_crypto_input_next
+#undef _
+ DPDK_CRYPTO_INPUT_N_NEXT,
+} dpdk_crypto_input_next_t;
#define MAX_QP_PER_LCORE 16
@@ -41,196 +57,334 @@ typedef struct
typedef struct
{
dpdk_gcm_cnt_blk cb;
- union
- {
- u8 aad[12];
- u8 icv[64];
- };
-} dpdk_cop_priv_t;
+ u8 aad[12];
+ u32 next;
+ u8 icv[32];
+} dpdk_op_priv_t __attribute__ ((aligned (16)));
typedef struct
{
- u8 cipher_algo;
- u8 auth_algo;
- u8 is_outbound;
- u8 is_aead;
-} crypto_worker_qp_key_t;
+ u16 *resource_idx;
+ uword *session_by_drv_id_and_sa_index;
+ u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
+ u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
+ struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
+} crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
typedef struct
{
- u16 dev_id;
- u16 qp_id;
- u16 is_outbound;
- i16 inflights;
- u32 bi[VLIB_FRAME_SIZE];
- struct rte_crypto_op *cops[VLIB_FRAME_SIZE];
- struct rte_crypto_op **free_cops;
-} crypto_qp_data_t;
+ char *name;
+ enum rte_crypto_sym_xform_type type;
+ u32 alg;
+ u8 key_len;
+ u8 iv_len;
+ u8 trunc_size;
+ u8 boundary;
+ u8 disabled;
+ u8 resources;
+} crypto_alg_t __attribute__ ((aligned (8)));
typedef struct
{
- u8 qp_index;
- void *sess;
-} crypto_sa_session_t;
+ u16 *free_resources;
+ u16 *used_resources;
+ u8 cipher_support[IPSEC_CRYPTO_N_ALG];
+ u8 auth_support[IPSEC_INTEG_N_ALG];
+ u8 drv_id;
+ u8 numa;
+ u16 id;
+ const i8 *name;
+ u32 max_qp;
+ u64 features;
+} crypto_dev_t;
+
+typedef struct
+{
+ const i8 *name;
+ u16 *devs;
+} crypto_drv_t;
+
+typedef struct
+{
+ u16 thread_idx;
+ u8 remove;
+ u8 drv_id;
+ u8 dev_id;
+ u8 numa;
+ u16 qp_id;
+ u16 inflights[2];
+ u16 n_ops;
+ u16 __unused;
+ struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
+ u32 bi[VLIB_FRAME_SIZE];
+} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
typedef struct
{
- crypto_sa_session_t *sa_sess_d[2];
- crypto_qp_data_t *qp_data;
- uword *algo_qp_map;
-} crypto_worker_main_t;
+ struct rte_mempool *crypto_op;
+ struct rte_mempool *session_h;
+ struct rte_mempool **session_drv;
+ uword *session_by_sa_index;
+ u64 crypto_op_get_failed;
+ u64 session_h_failed;
+ u64 *session_drv_failed;
+} crypto_data_t;
typedef struct
{
- struct rte_mempool **sess_h_pools;
- struct rte_mempool **sess_pools;
- struct rte_mempool **cop_pools;
crypto_worker_main_t *workers_main;
+ struct rte_cryptodev_sym_session **sa_session;
+ crypto_dev_t *dev;
+ crypto_resource_t *resource;
+ crypto_alg_t *cipher_algs;
+ crypto_alg_t *auth_algs;
+ crypto_data_t *data;
+ crypto_drv_t *drv;
+ u8 max_drv_id;
u8 enabled;
} dpdk_crypto_main_t;
dpdk_crypto_main_t dpdk_crypto_main;
-extern vlib_node_registration_t dpdk_crypto_input_node;
+static const u8 pad_data[] =
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 };
-#define CRYPTO_N_FREE_COPS (VLIB_FRAME_SIZE * 3)
+void crypto_auto_placement (void);
-static_always_inline void
-crypto_alloc_cops ()
+clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session,
+ u32 sa_idx, crypto_resource_t * res,
+ crypto_worker_main_t * cwm, u8 is_outbound);
+
+static_always_inline u32
+crypto_op_len (void)
+{
+ const u32 align = 16;
+ u32 op_size =
+ sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
+
+ return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t);
+}
+
+static_always_inline u32
+crypto_op_get_priv_offset (void)
+{
+ const u32 align = 16;
+ u32 offset;
+
+ offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
+ offset = (offset + align - 1) & ~(align - 1);
+
+ return offset;
+}
+
+static_always_inline dpdk_op_priv_t *
+crypto_op_get_priv (struct rte_crypto_op * op)
+{
+ return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
+}
+
+/* XXX this requires 64 bit builds so hash_xxx macros use u64 key */
+typedef union
+{
+ u64 val;
+ struct
+ {
+ u32 drv_id;
+ u32 sa_idx;
+ };
+} crypto_session_key_t;
+
+static_always_inline clib_error_t *
+crypto_get_session (struct rte_cryptodev_sym_session **session,
+ u32 sa_idx,
+ crypto_resource_t * res,
+ crypto_worker_main_t * cwm, u8 is_outbound)
+{
+ crypto_session_key_t key = { 0 };
+
+ key.drv_id = res->drv_id;
+ key.sa_idx = sa_idx;
+
+ uword *val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val);
+
+ if (PREDICT_FALSE (!val))
+ return create_sym_session (session, sa_idx, res, cwm, is_outbound);
+
+ session[0] = (struct rte_cryptodev_sym_session *) val[0];
+
+ return NULL;
+}
+
+static_always_inline u16
+get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa)
+{
+ u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg];
+ u16 auth_res = cwm->auth_resource_idx[sa->integ_alg];
+ u8 is_aead;
+
+ /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
+
+ is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) |
+ (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) |
+ (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
+
+ if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
+ return auth_res;
+
+ if (cipher_res == auth_res)
+ return cipher_res;
+
+ if (is_aead)
+ return cipher_res;
+
+ return (u16) ~ 0;
+}
+
+static_always_inline i32
+crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n)
{
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
- u32 thread_index = vlib_get_thread_index ();
- crypto_worker_main_t *cwm = &dcm->workers_main[thread_index];
- unsigned socket_id = rte_socket_id ();
- crypto_qp_data_t *qpd;
+ crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
+ i32 ret;
- /* *INDENT-OFF* */
- vec_foreach (qpd, cwm->qp_data)
- {
- u32 l = vec_len (qpd->free_cops);
+ ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n);
- if (PREDICT_FALSE (l < VLIB_FRAME_SIZE))
- {
- u32 n_alloc;
+ data->crypto_op_get_failed += ! !ret;
- if (PREDICT_FALSE (!qpd->free_cops))
- vec_alloc (qpd->free_cops, CRYPTO_N_FREE_COPS);
+ return ret;
+}
- n_alloc = rte_crypto_op_bulk_alloc (dcm->cop_pools[socket_id],
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- &qpd->free_cops[l],
- CRYPTO_N_FREE_COPS - l - 1);
+static_always_inline void
+crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
+{
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
- _vec_len (qpd->free_cops) = l + n_alloc;
- }
- }
- /* *INDENT-ON* */
+ if (!n)
+ return;
+
+ rte_mempool_put_bulk (data->crypto_op, (void **) ops, n);
}
static_always_inline void
-crypto_free_cop (crypto_qp_data_t * qpd, struct rte_crypto_op **cops, u32 n)
+crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, u8 outbound,
+ u32 node_index, u32 error, u8 numa)
{
- u32 l = vec_len (qpd->free_cops);
+ dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
+ crypto_resource_t *res;
+ u16 *res_idx;
- if (l + n >= CRYPTO_N_FREE_COPS)
+ /* *INDENT-OFF* */
+ vec_foreach (res_idx, cwm->resource_idx)
{
- l -= VLIB_FRAME_SIZE;
- rte_mempool_put_bulk (cops[0]->mempool,
- (void **) &qpd->free_cops[l], VLIB_FRAME_SIZE);
- }
- clib_memcpy (&qpd->free_cops[l], cops, sizeof (*cops) * n);
+ u16 enq;
+ res = vec_elt_at_index (dcm->resource, res_idx[0]);
+
+ if (!res->n_ops)
+ continue;
+
+ enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id + outbound,
+ res->ops, res->n_ops);
+ res->inflights[outbound] += enq;
+
+ if (PREDICT_FALSE (enq < res->n_ops))
+ {
+ crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq);
+ vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq);
- _vec_len (qpd->free_cops) = l + n;
+ vlib_node_increment_counter (vm, node_index, error,
+ res->n_ops - enq);
+ }
+ res->n_ops = 0;
+ }
+ /* *INDENT-ON* */
}
-static_always_inline int
-check_algo_is_supported (const struct rte_cryptodev_capabilities *cap,
- char *name)
+static_always_inline void
+crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
{
- struct
- {
- enum rte_crypto_sym_xform_type type;
- union
- {
- enum rte_crypto_auth_algorithm auth;
- enum rte_crypto_cipher_algorithm cipher;
-#if ! DPDK_NO_AEAD
- enum rte_crypto_aead_algorithm aead;
-#endif
- };
- char *name;
- } supported_algo[] =
- {
- {
- .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_NULL,.name = "NULL"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_AES_CBC,.name = "AES_CBC"},
+ icb->salt = salt;
+ icb->iv[0] = seq;
+ icb->iv[1] = seq_hi;
#if DPDK_NO_AEAD
- {
- .type = RTE_CRYPTO_SYM_XFORM_CIPHER,.cipher =
- RTE_CRYPTO_CIPHER_AES_GCM,.name = "AES-GCM"},
-#else
- {
- .type = RTE_CRYPTO_SYM_XFORM_AEAD,.aead =
- RTE_CRYPTO_AEAD_AES_GCM,.name = "AES-GCM"},
+ icb->cnt = clib_host_to_net_u32 (1);
#endif
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_NULL,.name = "NULL"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_SHA1_HMAC,.name = "HMAC-SHA1"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_SHA256_HMAC,.name = "HMAC-SHA256"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_SHA384_HMAC,.name = "HMAC-SHA384"},
- {
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_SHA512_HMAC,.name = "HMAC-SHA512"},
+}
+
+#define __unused __attribute__((unused))
+static_always_inline void
+crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
+ struct rte_crypto_op *op, void *session,
+ u32 cipher_off, u32 cipher_len,
+ u8 * icb __unused, u32 iv_size __unused,
+ u32 auth_off, u32 auth_len,
+ u8 * aad __unused, u32 aad_size __unused,
+ u8 * digest, u64 digest_paddr, u32 digest_size __unused)
+{
+ struct rte_crypto_sym_op *sym_op;
+
+ sym_op = (struct rte_crypto_sym_op *) (op + 1);
+
+ sym_op->m_src = mb0;
+ sym_op->session = session;
+
+ if (!digest_paddr)
+ digest_paddr = mb0->buf_physaddr + ((u8 *) digest) - ((u8 *) mb0);
+
#if DPDK_NO_AEAD
+ sym_op->cipher.data.offset = cipher_off;
+ sym_op->cipher.data.length = cipher_len;
+
+ sym_op->cipher.iv.data = icb;
+ sym_op->cipher.iv.phys_addr =
+ op->phys_addr + (uintptr_t) icb - (uintptr_t) op;
+ sym_op->cipher.iv.length = iv_size;
+
+ if (is_aead)
{
- .type = RTE_CRYPTO_SYM_XFORM_AUTH,.auth =
- RTE_CRYPTO_AUTH_AES_GCM,.name = "AES-GCM"},
-#endif
+ sym_op->auth.aad.data = aad;
+ sym_op->auth.aad.phys_addr =
+ op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
+ sym_op->auth.aad.length = aad_size;
+ }
+ else
{
- /* tail */
- .type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED}
- };
+ sym_op->auth.data.offset = auth_off;
+ sym_op->auth.data.length = auth_len;
+ }
- uint32_t i = 0;
+ sym_op->auth.digest.data = digest;
+ sym_op->auth.digest.phys_addr = digest_paddr;
+ sym_op->auth.digest.length = digest_size;
+#else /* ! DPDK_NO_AEAD */
+ if (is_aead)
+ {
+ sym_op->aead.data.offset = cipher_off;
+ sym_op->aead.data.length = cipher_len;
- if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- return -1;
+ sym_op->aead.aad.data = aad;
+ sym_op->aead.aad.phys_addr =
+ op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
- while (supported_algo[i].type != RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
- {
- if (cap->sym.xform_type == supported_algo[i].type)
- {
- if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- cap->sym.cipher.algo == supported_algo[i].cipher) ||
-#if ! DPDK_NO_AEAD
- (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD &&
- cap->sym.aead.algo == supported_algo[i].aead) ||
-#endif
- (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- cap->sym.auth.algo == supported_algo[i].auth))
- {
- if (name)
- strcpy (name, supported_algo[i].name);
- return 0;
- }
- }
-
- i++;
+ sym_op->aead.digest.data = digest;
+ sym_op->aead.digest.phys_addr = digest_paddr;
}
+ else
+ {
+ sym_op->cipher.data.offset = cipher_off;
+ sym_op->cipher.data.length = cipher_len;
- return -1;
+ sym_op->auth.data.offset = auth_off;
+ sym_op->auth.data.length = auth_len;
+
+ sym_op->auth.digest.data = digest;
+ sym_op->auth.digest.phys_addr = digest_paddr;
+ }
+#endif /* DPDK_NO_AEAD */
}
+#undef __unused
+
#endif /* __DPDK_IPSEC_H__ */
/*