diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:15:11 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:45:54 +0000 |
commit | 055c52583a2794da8ba1e85a48cce3832372b12f (patch) | |
tree | 8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /examples/ipsec-secgw/esp.c | |
parent | f239aed5e674965691846e8ce3f187dd47523689 (diff) |
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'examples/ipsec-secgw/esp.c')
-rw-r--r-- | examples/ipsec-secgw/esp.c | 144 |
1 files changed, 102 insertions, 42 deletions
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index 70bb81f7..c3efe52b 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -58,8 +58,11 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_sym_op *sym_cop; int32_t payload_len, ip_hdr_len; - RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + return 0; + + RTE_ASSERT(m != NULL); RTE_ASSERT(cop != NULL); ip4 = rte_pktmbuf_mtod(m, struct ip *); @@ -103,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, aad = get_aad(m); memcpy(aad, iv - sizeof(struct esp_hdr), 8); sym_cop->aead.aad.data = aad; - sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } else { sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + @@ -154,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } @@ -175,29 +178,44 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + } + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); return -1; } - nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, - rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); - pad_len = nexthdr - 1; + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { + nexthdr = &m->inner_esp_next_proto; + } else { + nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, + rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); + pad_len = nexthdr - 1; + + padding = pad_len - *pad_len; + for (i = 0; i < *pad_len; i++) { + if (padding[i] != i + 1) { + RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + return -EINVAL; + } + } - padding = pad_len - *pad_len; - for (i = 0; i < *pad_len; i++) { - if (padding[i] != i + 1) { - RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { + RTE_LOG(ERR, IPSEC_ESP, + "failed to remove pad_len + digest\n"); return -EINVAL; } } - if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { - RTE_LOG(ERR, IPSEC_ESP, - "failed to remove pad_len + digest\n"); - return -EINVAL; - } - if (unlikely(sa->flags == TRANSPORT)) { ip = rte_pktmbuf_mtod(m, struct ip *); ip4 = (struct ip *)rte_pktmbuf_adj(m, @@ -211,7 +229,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, /* XXX No option headers supported */ memmove(ip6, ip, sizeof(struct ip6_hdr)); ip6->ip6_nxt = *nexthdr; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } else ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); @@ -226,14 +245,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct ip *ip4; struct ip6_hdr *ip6; struct esp_hdr *esp = NULL; - uint8_t *padding, *new_ip, nlp; + uint8_t *padding = NULL, *new_ip, nlp; struct rte_crypto_sym_op *sym_cop; int32_t i; uint16_t pad_payload_len, pad_len, ip_hdr_len; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - RTE_ASSERT(cop != NULL); ip_hdr_len = 0; @@ -283,12 +301,19 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, return -EINVAL; } - padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len); - if (unlikely(padding == NULL)) { - RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n"); - return -ENOSPC; + /* Add trailer padding if it is not constructed by HW */ + if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { + padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + + sa->digest_len); + if (unlikely(padding == NULL)) { + RTE_LOG(ERR, IPSEC_ESP, + "not enough mbuf trailing space\n"); + return -ENOSPC; + } + rte_prefetch0(padding); } - rte_prefetch0(padding); switch (sa->flags) { case IP4_TUNNEL: @@ -306,14 +331,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sizeof(struct esp_hdr) + sa->iv_len); memmove(new_ip, ip4, ip_hdr_len); esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + ip4 = (struct ip *)new_ip; if (likely(ip4->ip_v == IPVERSION)) { - ip4 = (struct ip *)new_ip; ip4->ip_p = IPPROTO_ESP; ip4->ip_len = htons(rte_pktmbuf_data_len(m)); } else { ip6 = (struct ip6_hdr *)new_ip; ip6->ip6_nxt = IPPROTO_ESP; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } @@ -321,15 +347,46 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, esp->spi = rte_cpu_to_be_32(sa->spi); esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + /* set iv */ uint64_t *iv = (uint64_t *)(esp + 1); + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + *iv = rte_cpu_to_be_64(sa->seq); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + *iv = rte_cpu_to_be_64(sa->seq); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + } + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { + /* Set the inner esp next protocol for HW trailer */ + m->inner_esp_next_proto = nlp; + m->packet_type |= RTE_PTYPE_TUNNEL_ESP; + } else { + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + } + goto done; + } + + RTE_ASSERT(cop != NULL); sym_cop = get_sym_cop(cop); sym_cop->m_src = m; if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { uint8_t *aad; - *iv = sa->seq; sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->aead.data.length = pad_payload_len; @@ -342,30 +399,28 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = sa->seq; + icb->iv = rte_cpu_to_be_64(sa->seq); icb->cnt = rte_cpu_to_be_32(1); aad = get_aad(m); memcpy(aad, esp, 8); sym_cop->aead.aad.data = aad; - sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_AES_CBC: - memset(iv, 0, sa->iv_len); sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr); sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; break; case RTE_CRYPTO_CIPHER_AES_CTR: - *iv = sa->seq; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = pad_payload_len; @@ -384,7 +439,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = sa->seq; + icb->iv = rte_cpu_to_be_64(sa->seq); icb->cnt = rte_cpu_to_be_32(1); switch (sa->auth_algo) { @@ -403,25 +458,30 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } +done: return 0; } int -esp_outbound_post(struct rte_mbuf *m __rte_unused, - struct ipsec_sa *sa __rte_unused, - struct rte_crypto_op *cop) +esp_outbound_post(struct rte_mbuf *m, + struct ipsec_sa *sa, + struct rte_crypto_op *cop) { RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - RTE_ASSERT(cop != NULL); - if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { - RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); - return -1; + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + m->ol_flags |= PKT_TX_SEC_OFFLOAD; + } else { + RTE_ASSERT(cop != NULL); + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); + return -1; + } } return 0; |