aboutsummaryrefslogtreecommitdiffstats
path: root/examples/ipsec-secgw
diff options
context:
space:
mode:
Diffstat (limited to 'examples/ipsec-secgw')
-rw-r--r--examples/ipsec-secgw/esp.c245
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c46
-rw-r--r--examples/ipsec-secgw/ipsec.c11
-rw-r--r--examples/ipsec-secgw/ipsec.h11
-rw-r--r--examples/ipsec-secgw/sa.c287
5 files changed, 412 insertions, 188 deletions
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index e77afa0e..70bb81f7 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,68 +84,79 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
}
sym_cop = get_sym_cop(cop);
-
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
- sa->iv_len;
- sym_cop->cipher.data.length = payload_len;
-
- struct cnt_blk *icb;
- uint8_t *aad;
- uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
-
- switch (sa->cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_AES_CBC:
- sym_cop->cipher.iv.data = iv;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- ip_hdr_len + sizeof(struct esp_hdr));
- sym_cop->cipher.iv.length = sa->iv_len;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sym_cop->aead.data.length = payload_len;
+
+ struct cnt_blk *icb;
+ uint8_t *aad;
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+
icb = get_cnt_blk(m);
icb->salt = sa->salt;
memcpy(&icb->iv, iv, 8);
icb->cnt = rte_cpu_to_be_32(1);
- sym_cop->cipher.iv.data = (uint8_t *)icb;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->cipher.iv.length = 16;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
- return -EINVAL;
- }
- switch (sa->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.data = aad;
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->auth.aad.length = 8;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
- sa->auth_algo);
- return -EINVAL;
- }
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
- rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.length = sa->digest_len;
+ sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ } else {
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
+ sym_cop->cipher.data.length = payload_len;
+
+ struct cnt_blk *icb;
+ uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
+ uint8_t *, IV_OFFSET);
+
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ /* Copy IV at the end of crypto operation */
+ rte_memcpy(iv_ptr, iv, sa->iv_len);
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ memcpy(&icb->iv, iv, 8);
+ icb->cnt = rte_cpu_to_be_32(1);
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
+ return -EINVAL;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ }
return 0;
}
@@ -314,71 +325,87 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = get_sym_cop(cop);
sym_cop->m_src = m;
- switch (sa->cipher_algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_AES_CBC:
- memset(iv, 0, sa->iv_len);
- sym_cop->cipher.data.offset = ip_hdr_len +
- sizeof(struct esp_hdr);
- sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
+
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ uint8_t *aad;
+
*iv = sa->seq;
- sym_cop->cipher.data.offset = ip_hdr_len +
+ sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
- sym_cop->cipher.data.length = pad_payload_len;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
- sa->cipher_algo);
- return -EINVAL;
- }
+ sym_cop->aead.data.length = pad_payload_len;
+
+ /* Fill pad_len using default sequential scheme */
+ for (i = 0; i < pad_len - 2; i++)
+ padding[i] = i + 1;
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+
+ struct cnt_blk *icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ icb->iv = sa->seq;
+ icb->cnt = rte_cpu_to_be_32(1);
- /* Fill pad_len using default sequential scheme */
- for (i = 0; i < pad_len - 2; i++)
- padding[i] = i + 1;
- padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = nlp;
-
- struct cnt_blk *icb = get_cnt_blk(m);
- icb->salt = sa->salt;
- icb->iv = sa->seq;
- icb->cnt = rte_cpu_to_be_32(1);
- sym_cop->cipher.iv.data = (uint8_t *)icb;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->cipher.iv.length = 16;
-
- uint8_t *aad;
-
- switch (sa->auth_algo) {
- case RTE_CRYPTO_AUTH_NULL:
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- sym_cop->auth.data.offset = ip_hdr_len;
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + pad_payload_len;
- break;
- case RTE_CRYPTO_AUTH_AES_GCM:
aad = get_aad(m);
memcpy(aad, esp, 8);
- sym_cop->auth.aad.data = aad;
- sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.data = aad;
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
- sym_cop->auth.aad.length = 8;
- break;
- default:
- RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
- sa->auth_algo);
- return -EINVAL;
- }
- sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.length = sa->digest_len;
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ memset(iv, 0, sa->iv_len);
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct esp_hdr);
+ sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ *iv = sa->seq;
+ sym_cop->cipher.data.offset = ip_hdr_len +
+ sizeof(struct esp_hdr) + sa->iv_len;
+ sym_cop->cipher.data.length = pad_payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ /* Fill pad_len using default sequential scheme */
+ for (i = 0; i < pad_len - 2; i++)
+ padding[i] = i + 1;
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+
+ struct cnt_blk *icb = get_cnt_blk(m);
+ icb->salt = sa->salt;
+ icb->iv = sa->seq;
+ icb->cnt = rte_cpu_to_be_32(1);
+
+ switch (sa->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + pad_payload_len;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
+ sa->auth_algo);
+ return -EINVAL;
+ }
+
+ sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
+ }
return 0;
}
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 8cbf6ac4..99dc270c 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -710,10 +710,12 @@ main_loop(__attribute__((unused)) void *dummy)
qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
+ qconf->inbound.session_pool = socket_ctx[socket_id].session_pool;
qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
+ qconf->outbound.session_pool = socket_ctx[socket_id].session_pool;
if (qconf->nb_rx_queue == 0) {
RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
@@ -1238,6 +1240,13 @@ cryptodevs_init(void)
printf("lcore/cryptodev/qp mappings:\n");
+ uint32_t max_sess_sz = 0, sess_sz;
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+
idx = 0;
/* Start from last cdev id to give HW priority */
for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
@@ -1266,17 +1275,39 @@ cryptodevs_init(void)
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
- dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
- dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
+
+ if (!socket_ctx[dev_conf.socket_id].session_pool) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", dev_conf.socket_id);
+ sess_mp = rte_mempool_create(mp_name,
+ CDEV_MP_NB_OBJS,
+ max_sess_sz,
+ CDEV_MP_CACHE_SZ,
+ 0, NULL, NULL, NULL,
+ NULL, dev_conf.socket_id,
+ 0);
+ if (sess_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create session pool on socket %d\n",
+ dev_conf.socket_id);
+ else
+ printf("Allocated session pool on socket %d\n",
+ dev_conf.socket_id);
+ socket_ctx[dev_conf.socket_id].session_pool = sess_mp;
+ }
if (rte_cryptodev_configure(cdev_id, &dev_conf))
- rte_panic("Failed to initialize crypodev %u\n",
+ rte_panic("Failed to initialize cryptodev %u\n",
cdev_id);
qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
- &qp_conf, dev_conf.socket_id))
+ &qp_conf, dev_conf.socket_id,
+ socket_ctx[dev_conf.socket_id].session_pool))
rte_panic("Failed to setup queue %u for "
"cdev_id %u\n", 0, cdev_id);
@@ -1332,6 +1363,11 @@ port_init(uint8_t portid)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: "
+ "err=%d, port=%d\n", ret, portid);
+
/* init one TX queue per lcore */
tx_queueid = 0;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
@@ -1433,7 +1469,7 @@ main(int32_t argc, char **argv)
nb_lcores = rte_lcore_count();
- /* Replicate each contex per socket */
+ /* Replicate each context per socket */
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index edca5f02..0afb9d67 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,7 @@
#include "esp.h"
static inline int
-create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
+create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
@@ -72,11 +72,15 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms);
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_pool);
rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp,
sa->crypto_session);
if (ret < 0) {
@@ -140,7 +144,6 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
- priv->cop.sym = &priv->sym_cop;
if ((unlikely(sa->crypto_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index fe426614..da1fb1b2 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,9 @@
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
+
#define uint32_t_to_char(ip, a, b, c, d) do {\
*a = (uint8_t)(ip >> 24 & 0xff);\
*b = (uint8_t)(ip >> 16 & 0xff);\
@@ -72,7 +75,6 @@
struct rte_crypto_xform;
struct ipsec_xform;
-struct rte_cryptodev_session;
struct rte_mbuf;
struct ipsec_sa;
@@ -100,6 +102,7 @@ struct ipsec_sa {
struct rte_cryptodev_sym_session *crypto_session;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_aead_algorithm aead_algo;
uint16_t digest_len;
uint16_t iv_len;
uint16_t block_size;
@@ -118,10 +121,10 @@ struct ipsec_sa {
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
- uint8_t buf[32];
struct ipsec_sa *sa;
struct rte_crypto_op cop;
struct rte_crypto_sym_op sym_cop;
+ uint8_t buf[32];
} __rte_cache_aligned;
struct cdev_qp {
@@ -140,6 +143,7 @@ struct ipsec_ctx {
uint16_t nb_qps;
uint16_t last_qp;
struct cdev_qp tbl[MAX_QP_PER_LCORE];
+ struct rte_mempool *session_pool;
};
struct cdev_key {
@@ -158,6 +162,7 @@ struct socket_ctx {
struct rt_ctx *rt_ip4;
struct rt_ctx *rt_ip6;
struct rte_mempool *mbuf_pool;
+ struct rte_mempool *session_pool;
};
struct cnt_blk {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 39624c49..7be0e628 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,10 +64,20 @@ struct supported_auth_algo {
enum rte_crypto_auth_algorithm algo;
uint16_t digest_len;
uint16_t key_len;
- uint8_t aad_len;
uint8_t key_not_req;
};
+struct supported_aead_algo {
+ const char *keyword;
+ enum rte_crypto_aead_algorithm algo;
+ uint16_t iv_len;
+ uint16_t block_size;
+ uint16_t digest_len;
+ uint16_t key_len;
+ uint8_t aad_len;
+};
+
+
const struct supported_cipher_algo cipher_algos[] = {
{
.keyword = "null",
@@ -84,13 +94,6 @@ const struct supported_cipher_algo cipher_algos[] = {
.key_len = 16
},
{
- .keyword = "aes-128-gcm",
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .iv_len = 8,
- .block_size = 4,
- .key_len = 20
- },
- {
.keyword = "aes-128-ctr",
.algo = RTE_CRYPTO_CIPHER_AES_CTR,
.iv_len = 8,
@@ -118,13 +121,18 @@ const struct supported_auth_algo auth_algos[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.digest_len = 12,
.key_len = 32
- },
+ }
+};
+
+const struct supported_aead_algo aead_algos[] = {
{
.keyword = "aes-128-gcm",
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .iv_len = 8,
+ .block_size = 4,
+ .key_len = 20,
.digest_len = 16,
.aad_len = 8,
- .key_not_req = 1
}
};
@@ -166,6 +174,22 @@ find_match_auth_algo(const char *auth_keyword)
return NULL;
}
+static const struct supported_aead_algo *
+find_match_aead_algo(const char *aead_keyword)
+{
+ size_t i;
+
+ for (i = 0; i < RTE_DIM(aead_algos); i++) {
+ const struct supported_aead_algo *algo =
+ &aead_algos[i];
+
+ if (strcmp(aead_keyword, algo->keyword) == 0)
+ return algo;
+ }
+
+ return NULL;
+}
+
/** parse_key_string
* parse x:x:x:x.... hex number key string into uint8_t *key
* return:
@@ -210,6 +234,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
uint32_t *ri /*rule index*/;
uint32_t cipher_algo_p = 0;
uint32_t auth_algo_p = 0;
+ uint32_t aead_algo_p = 0;
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
@@ -319,8 +344,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
rule->salt = (uint32_t)rte_rand();
- if ((algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) ||
- (algo->algo == RTE_CRYPTO_CIPHER_AES_GCM)) {
+ if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
key_len -= 4;
rule->cipher_key_len = key_len;
memcpy(&rule->salt,
@@ -386,6 +410,61 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
+ if (strcmp(tokens[ti], "aead_algo") == 0) {
+ const struct supported_aead_algo *algo;
+ uint32_t key_len;
+
+ APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
+ status);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ algo = find_match_aead_algo(tokens[ti]);
+
+ APP_CHECK(algo != NULL, status, "unrecognized "
+ "input \"%s\"", tokens[ti]);
+
+ rule->aead_algo = algo->algo;
+ rule->cipher_key_len = algo->key_len;
+ rule->digest_len = algo->digest_len;
+ rule->aad_len = algo->key_len;
+ rule->block_size = algo->block_size;
+ rule->iv_len = algo->iv_len;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
+ status, "unrecognized input \"%s\", "
+ "expect \"aead_key\"", tokens[ti]);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ key_len = parse_key_string(tokens[ti],
+ rule->cipher_key);
+ APP_CHECK(key_len == rule->cipher_key_len, status,
+ "unrecognized input \"%s\"", tokens[ti]);
+ if (status->status < 0)
+ return;
+
+ key_len -= 4;
+ rule->cipher_key_len = key_len;
+ memcpy(&rule->salt,
+ &rule->cipher_key[key_len], 4);
+
+ aead_algo_p = 1;
+ continue;
+ }
+
if (strcmp(tokens[ti], "src") == 0) {
APP_CHECK_PRESENCE(src_p, tokens[ti], status);
if (status->status < 0)
@@ -477,13 +556,25 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
return;
}
- APP_CHECK(cipher_algo_p == 1, status, "missing cipher options");
- if (status->status < 0)
- return;
+ if (aead_algo_p) {
+ APP_CHECK(cipher_algo_p == 0, status,
+ "AEAD used, no need for cipher options");
+ if (status->status < 0)
+ return;
- APP_CHECK(auth_algo_p == 1, status, "missing auth options");
- if (status->status < 0)
- return;
+ APP_CHECK(auth_algo_p == 0, status,
+ "AEAD used, no need for auth options");
+ if (status->status < 0)
+ return;
+ } else {
+ APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
+ if (status->status < 0)
+ return;
+
+ APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
+ if (status->status < 0)
+ return;
+ }
APP_CHECK(mode_p == 1, status, "missing mode option");
if (status->status < 0)
@@ -514,6 +605,13 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
}
}
+ for (i = 0; i < RTE_DIM(aead_algos); i++) {
+ if (aead_algos[i].algo == sa->aead_algo) {
+ printf("%s ", aead_algos[i].keyword);
+ break;
+ }
+ }
+
printf("mode:");
switch (sa->flags) {
@@ -589,6 +687,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
{
struct ipsec_sa *sa;
uint32_t i, idx;
+ uint16_t iv_length;
for (i = 0; i < nb_entries; i++) {
idx = SPI2IDX(entries[i].spi);
@@ -607,56 +706,110 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
}
- if (inbound) {
- sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
- sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
- sa_ctx->xf[idx].b.cipher.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].b.cipher.op =
- RTE_CRYPTO_CIPHER_OP_DECRYPT;
- sa_ctx->xf[idx].b.next = NULL;
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ iv_length = 16;
+
+ if (inbound) {
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op =
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
+ } else { /* outbound */
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op =
+ RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
+ }
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].a.auth.add_auth_data_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
- sa_ctx->xf[idx].a.auth.key.length =
- sa->auth_key_len;
- sa_ctx->xf[idx].a.auth.digest_length =
- sa->digest_len;
- sa_ctx->xf[idx].a.auth.op =
- RTE_CRYPTO_AUTH_OP_VERIFY;
-
- } else { /* outbound */
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
- sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.cipher.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.cipher.op =
- RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- sa_ctx->xf[idx].a.next = NULL;
-
- sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
- sa_ctx->xf[idx].b.auth.add_auth_data_length =
- sa->aad_len;
- sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
- sa_ctx->xf[idx].b.auth.key.length =
- sa->auth_key_len;
- sa_ctx->xf[idx].b.auth.digest_length =
- sa->digest_len;
- sa_ctx->xf[idx].b.auth.op =
- RTE_CRYPTO_AUTH_OP_GENERATE;
- }
+ sa->xforms = &sa_ctx->xf[idx].a;
- sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
- sa_ctx->xf[idx].b.next = NULL;
- sa->xforms = &sa_ctx->xf[idx].a;
+ print_one_sa_rule(sa, inbound);
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ iv_length = sa->iv_len;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ iv_length = 16;
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP,
+ "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+
+ if (inbound) {
+ sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
+ sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].b.cipher.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].b.cipher.op =
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
+
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
+ sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
+ sa_ctx->xf[idx].a.auth.key.length =
+ sa->auth_key_len;
+ sa_ctx->xf[idx].a.auth.digest_length =
+ sa->digest_len;
+ sa_ctx->xf[idx].a.auth.op =
+ RTE_CRYPTO_AUTH_OP_VERIFY;
+ } else { /* outbound */
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
+ sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.cipher.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.cipher.op =
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
+
+ sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
+ sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
+ sa_ctx->xf[idx].b.auth.key.length =
+ sa->auth_key_len;
+ sa_ctx->xf[idx].b.auth.digest_length =
+ sa->digest_len;
+ sa_ctx->xf[idx].b.auth.op =
+ RTE_CRYPTO_AUTH_OP_GENERATE;
+ }
- print_one_sa_rule(sa, inbound);
+ sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
+ sa_ctx->xf[idx].b.next = NULL;
+ sa->xforms = &sa_ctx->xf[idx].a;
+
+ print_one_sa_rule(sa, inbound);
+ }
}
return 0;