From 6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Thu, 8 Dec 2016 14:07:29 +0100 Subject: Imported Upstream version 16.11 Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77 Signed-off-by: Christian Ehrhardt --- examples/ipsec-secgw/esp.c | 144 ++++++++++++++++++++++++++++++++------------- 1 file changed, 103 insertions(+), 41 deletions(-) (limited to 'examples/ipsec-secgw/esp.c') diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index 05caa77a..ec5a2e62 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -50,21 +50,6 @@ #include "esp.h" #include "ipip.h" -static inline void -random_iv_u64(uint64_t *buf, uint16_t n) -{ - uint32_t left = n & 0x7; - uint32_t i; - - RTE_ASSERT((n & 0x3) == 0); - - for (i = 0; i < (n >> 3); i++) - buf[i] = rte_rand(); - - if (left) - *((uint32_t *)&buf[i]) = (uint32_t)lrand48(); -} - int esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) @@ -98,22 +83,62 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, return -EINVAL; } - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); + sym_cop = get_sym_cop(cop); sym_cop->m_src = m; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = payload_len; - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + sym_cop->cipher.iv.data = iv; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + ip_hdr_len + sizeof(struct esp_hdr)); + sym_cop->cipher.iv.length = sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; + sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + - sa->iv_len + payload_len; + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct esp_hdr), 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); @@ -282,10 +307,32 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sa->seq++; esp->spi = rte_cpu_to_be_32(sa->spi); - esp->seq = rte_cpu_to_be_32(sa->seq); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); - if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC) - random_iv_u64((uint64_t *)(esp + 1), sa->iv_len); + uint64_t *iv = (uint64_t *)(esp + 1); + + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + case RTE_CRYPTO_CIPHER_AES_GCM: + *iv = sa->seq; + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } /* Fill pad_len using default sequential scheme */ for (i = 0; i < pad_len - 2; i++) @@ -293,22 +340,37 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, padding[pad_len - 2] = pad_len - 2; padding[pad_len - 1] = nlp; - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); - - sym_cop->m_src = m; - sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + - sa->iv_len; - sym_cop->cipher.data.length = pad_payload_len; - - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *, - ip_hdr_len + sizeof(struct esp_hdr)); + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = sa->seq; + icb->cnt = rte_cpu_to_be_32(1); + sym_cop->cipher.iv.data = (uint8_t *)icb; sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + (uint8_t *)icb - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->cipher.iv.length = 16; - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len + - pad_payload_len; + uint8_t *aad; + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + pad_payload_len; + break; + case RTE_CRYPTO_AUTH_AES_GCM: + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->auth.aad.data = aad; + sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + sym_cop->auth.aad.length = 8; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); -- cgit 1.2.3-korg