summaryrefslogtreecommitdiffstats
path: root/examples/ipsec-secgw/esp.c
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:09:40 +0200
commit8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (patch)
tree8c3c769777f7e66a2d1ba7dd7651b563cfde370b /examples/ipsec-secgw/esp.c
parent97f17497d162afdb82c8704bf097f0fee3724b2e (diff)
Imported Upstream version 16.07-rc1
Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'examples/ipsec-secgw/esp.c')
-rw-r--r--examples/ipsec-secgw/esp.c248
1 files changed, 167 insertions, 81 deletions
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 19273807..05caa77a 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -37,11 +37,11 @@
#include <sys/stat.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <fcntl.h>
#include <unistd.h>
#include <rte_common.h>
-#include <rte_memcpy.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_random.h>
@@ -50,15 +50,13 @@
#include "esp.h"
#include "ipip.h"
-#define IP_ESP_HDR_SZ (sizeof(struct ip) + sizeof(struct esp_hdr))
-
static inline void
random_iv_u64(uint64_t *buf, uint16_t n)
{
- unsigned left = n & 0x7;
- unsigned i;
+ uint32_t left = n & 0x7;
+ uint32_t i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
@@ -67,23 +65,35 @@ random_iv_u64(uint64_t *buf, uint16_t n)
*((uint32_t *)&buf[i]) = (uint32_t)lrand48();
}
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- int32_t payload_len;
+ struct ip *ip4;
struct rte_crypto_sym_op *sym_cop;
+ int32_t payload_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION))
+ ip_hdr_len = ip4->ip_hl * 4;
+ else if (ip4->ip_v == IP6_VERSION)
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
- sa->digest_len;
+ payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
+ sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -91,21 +101,19 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
- if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- sym_cop->auth.data.length = sizeof(struct esp_hdr);
- else
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
@@ -117,19 +125,21 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
}
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
+ struct ip *ip4, *ip;
+ struct ip6_hdr *ip6;
uint8_t *nexthdr, *pad_len;
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
@@ -139,111 +149,187 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i) {
- IPSEC_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- IPSEC_LOG(ERR, IPSEC_ESP,
+ RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
- return ip4ip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ ip4 = (struct ip *)rte_pktmbuf_adj(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ if (likely(ip->ip_v == IPVERSION)) {
+ memmove(ip4, ip, ip->ip_hl * 4);
+ ip4->ip_p = *nexthdr;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)ip4;
+ /* XXX No option headers supported */
+ memmove(ip6, ip, sizeof(struct ip6_hdr));
+ ip6->ip6_nxt = *nexthdr;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ } else
+ ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+
+ return 0;
}
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- uint16_t pad_payload_len, pad_len;
- struct ip *ip;
- struct esp_hdr *esp;
- int i;
- char *padding;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ struct esp_hdr *esp = NULL;
+ uint8_t *padding, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
+ int32_t i;
+ uint16_t pad_payload_len, pad_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip_hdr_len = 0;
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip_hdr_len = ip4->ip_hl * 4;
+ nlp = ip4->ip_p;
+ } else
+ nlp = IPPROTO_IPIP;
+ } else if (ip4->ip_v == IP6_VERSION) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ ip6 = (struct ip6_hdr *)ip4;
+ nlp = ip6->ip6_nxt;
+ } else
+ nlp = IPPROTO_IPV6;
+ } else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- /* Payload length */
- pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
- sa->block_size);
- pad_len = pad_payload_len - rte_pktmbuf_pkt_len(m);
-
- rte_prefetch0(rte_pktmbuf_mtod_offset(m, void *,
- rte_pktmbuf_pkt_len(m)));
+ /* Padded payload length */
+ pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
+ ip_hdr_len + 2, sa->block_size);
+ pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
+
+ RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
+ sa->flags == TRANSPORT);
+
+ if (likely(sa->flags == IP4_TUNNEL))
+ ip_hdr_len = sizeof(struct ip);
+ else if (sa->flags == IP6_TUNNEL)
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else if (sa->flags != TRANSPORT) {
+ RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
+ sa->flags);
+ return -EINVAL;
+ }
/* Check maximum packet size */
- if (unlikely(IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len +
- sa->digest_len > IP_MAXPACKET)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
+ if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
+ RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
- padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
-
- IPSEC_ASSERT(padding != NULL);
-
- ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
- sa->src, sa->dst);
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip4 + 1);
+ break;
+ case IP6_TUNNEL:
+ ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip6 + 1);
+ break;
+ case TRANSPORT:
+ new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ memmove(new_ip, ip4, ip_hdr_len);
+ esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ ip4 = (struct ip *)new_ip;
+ ip4->ip_p = IPPROTO_ESP;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)new_ip;
+ ip6->ip6_nxt = IPPROTO_ESP;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ }
- esp = (struct esp_hdr *)(ip + 1);
- esp->spi = sa->spi;
- esp->seq = htonl(sa->seq++);
+ sa->seq++;
+ esp->spi = rte_cpu_to_be_32(sa->spi);
+ esp->seq = rte_cpu_to_be_32(sa->seq);
- IPSEC_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
+ if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ random_iv_u64((uint64_t *)(esp + 1), sa->iv_len);
/* Fill pad_len using default sequential scheme */
for (i = 0; i < pad_len - 2; i++)
padding[i] = i + 1;
-
padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = IPPROTO_IPIP;
+ padding[pad_len - 1] = nlp;
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
+ sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len +
pad_payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
- if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
- random_iv_u64((uint64_t *)sym_cop->cipher.iv.data,
- sym_cop->cipher.iv.length);
-
return 0;
}
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
+esp_outbound_post(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}