aboutsummaryrefslogtreecommitdiffstats
path: root/examples/ipsec-secgw
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:09:40 +0200
commit8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (patch)
tree8c3c769777f7e66a2d1ba7dd7651b563cfde370b /examples/ipsec-secgw
parent97f17497d162afdb82c8704bf097f0fee3724b2e (diff)
Imported Upstream version 16.07-rc1
Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'examples/ipsec-secgw')
-rw-r--r--examples/ipsec-secgw/Makefile8
-rw-r--r--examples/ipsec-secgw/esp.c248
-rw-r--r--examples/ipsec-secgw/esp.h9
-rw-r--r--examples/ipsec-secgw/ipip.h149
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c338
-rw-r--r--examples/ipsec-secgw/ipsec.c60
-rw-r--r--examples/ipsec-secgw/ipsec.h75
-rw-r--r--examples/ipsec-secgw/rt.c229
-rw-r--r--examples/ipsec-secgw/sa.c466
-rw-r--r--examples/ipsec-secgw/sp4.c (renamed from examples/ipsec-secgw/sp.c)171
-rw-r--r--examples/ipsec-secgw/sp6.c448
11 files changed, 1617 insertions, 584 deletions
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index f9b59c22..06b6db1e 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -46,15 +46,17 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_sa.o += -diag-disable=vec
endif
-
-VPATH += $(SRCDIR)/librte_ipsec
+ifeq ($(DEBUG),1)
+CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0
+endif
#
# all source are stored in SRCS-y
#
SRCS-y += ipsec.c
SRCS-y += esp.c
-SRCS-y += sp.c
+SRCS-y += sp4.c
+SRCS-y += sp6.c
SRCS-y += sa.c
SRCS-y += rt.c
SRCS-y += ipsec-secgw.c
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 19273807..05caa77a 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -37,11 +37,11 @@
#include <sys/stat.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <fcntl.h>
#include <unistd.h>
#include <rte_common.h>
-#include <rte_memcpy.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_random.h>
@@ -50,15 +50,13 @@
#include "esp.h"
#include "ipip.h"
-#define IP_ESP_HDR_SZ (sizeof(struct ip) + sizeof(struct esp_hdr))
-
static inline void
random_iv_u64(uint64_t *buf, uint16_t n)
{
- unsigned left = n & 0x7;
- unsigned i;
+ uint32_t left = n & 0x7;
+ uint32_t i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
@@ -67,23 +65,35 @@ random_iv_u64(uint64_t *buf, uint16_t n)
*((uint32_t *)&buf[i]) = (uint32_t)lrand48();
}
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- int32_t payload_len;
+ struct ip *ip4;
struct rte_crypto_sym_op *sym_cop;
+ int32_t payload_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION))
+ ip_hdr_len = ip4->ip_hl * 4;
+ else if (ip4->ip_v == IP6_VERSION)
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
- sa->digest_len;
+ payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
+ sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -91,21 +101,19 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
- if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- sym_cop->auth.data.length = sizeof(struct esp_hdr);
- else
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
@@ -117,19 +125,21 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
}
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
+ struct ip *ip4, *ip;
+ struct ip6_hdr *ip6;
uint8_t *nexthdr, *pad_len;
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
@@ -139,111 +149,187 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i) {
- IPSEC_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- IPSEC_LOG(ERR, IPSEC_ESP,
+ RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
- return ip4ip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ ip4 = (struct ip *)rte_pktmbuf_adj(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ if (likely(ip->ip_v == IPVERSION)) {
+ memmove(ip4, ip, ip->ip_hl * 4);
+ ip4->ip_p = *nexthdr;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)ip4;
+ /* XXX No option headers supported */
+ memmove(ip6, ip, sizeof(struct ip6_hdr));
+ ip6->ip6_nxt = *nexthdr;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ } else
+ ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+
+ return 0;
}
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- uint16_t pad_payload_len, pad_len;
- struct ip *ip;
- struct esp_hdr *esp;
- int i;
- char *padding;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ struct esp_hdr *esp = NULL;
+ uint8_t *padding, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
+ int32_t i;
+ uint16_t pad_payload_len, pad_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip_hdr_len = 0;
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip_hdr_len = ip4->ip_hl * 4;
+ nlp = ip4->ip_p;
+ } else
+ nlp = IPPROTO_IPIP;
+ } else if (ip4->ip_v == IP6_VERSION) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ ip6 = (struct ip6_hdr *)ip4;
+ nlp = ip6->ip6_nxt;
+ } else
+ nlp = IPPROTO_IPV6;
+ } else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- /* Payload length */
- pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
- sa->block_size);
- pad_len = pad_payload_len - rte_pktmbuf_pkt_len(m);
-
- rte_prefetch0(rte_pktmbuf_mtod_offset(m, void *,
- rte_pktmbuf_pkt_len(m)));
+ /* Padded payload length */
+ pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
+ ip_hdr_len + 2, sa->block_size);
+ pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
+
+ RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
+ sa->flags == TRANSPORT);
+
+ if (likely(sa->flags == IP4_TUNNEL))
+ ip_hdr_len = sizeof(struct ip);
+ else if (sa->flags == IP6_TUNNEL)
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else if (sa->flags != TRANSPORT) {
+ RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
+ sa->flags);
+ return -EINVAL;
+ }
/* Check maximum packet size */
- if (unlikely(IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len +
- sa->digest_len > IP_MAXPACKET)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
+ if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
+ RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
- padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
-
- IPSEC_ASSERT(padding != NULL);
-
- ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
- sa->src, sa->dst);
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip4 + 1);
+ break;
+ case IP6_TUNNEL:
+ ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip6 + 1);
+ break;
+ case TRANSPORT:
+ new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ memmove(new_ip, ip4, ip_hdr_len);
+ esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ ip4 = (struct ip *)new_ip;
+ ip4->ip_p = IPPROTO_ESP;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)new_ip;
+ ip6->ip6_nxt = IPPROTO_ESP;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ }
- esp = (struct esp_hdr *)(ip + 1);
- esp->spi = sa->spi;
- esp->seq = htonl(sa->seq++);
+ sa->seq++;
+ esp->spi = rte_cpu_to_be_32(sa->spi);
+ esp->seq = rte_cpu_to_be_32(sa->seq);
- IPSEC_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
+ if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ random_iv_u64((uint64_t *)(esp + 1), sa->iv_len);
/* Fill pad_len using default sequential scheme */
for (i = 0; i < pad_len - 2; i++)
padding[i] = i + 1;
-
padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = IPPROTO_IPIP;
+ padding[pad_len - 1] = nlp;
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
+ sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len +
pad_payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
- if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
- random_iv_u64((uint64_t *)sym_cop->cipher.iv.data,
- sym_cop->cipher.iv.length);
-
return 0;
}
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
+esp_outbound_post(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index 31018823..fa5cc8af 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -46,21 +46,20 @@ struct esp_hdr {
/* Integrity Check Value - ICV */
};
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
#endif /* __RTE_IPSEC_XFORM_ESP_H__ */
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index 322076ce..ce25a2e2 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -37,67 +37,144 @@
#include <stdint.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_mbuf.h>
-#define IPV6_VERSION (6)
-
-static inline struct ip *
-ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)
+static inline void *
+ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
+ struct ip_addr *src, struct ip_addr *dst)
{
- struct ip *inip, *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint8_t ds_ecn;
- inip = rte_pktmbuf_mtod(m, struct ip*);
+ inip4 = rte_pktmbuf_mtod(m, struct ip *);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
+ if (inip4->ip_v == IPVERSION) {
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ ds_ecn = inip4->ip_tos;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ ds_ecn = ntohl(inip6->ip6_flow) >> 20;
+ }
+
+ if (is_ipv6) {
+ offset += sizeof(struct ip6_hdr);
+ outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip6 != NULL);
+
+ /* Per RFC4301 5.1.2.1 */
+ outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
+ outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+
+ outip6->ip6_nxt = IPPROTO_ESP;
+ outip6->ip6_hops = IPDEFTTL;
- outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
+ memcpy(&outip6->ip6_src.s6_addr, src, 16);
+ memcpy(&outip6->ip6_dst.s6_addr, dst, 16);
- IPSEC_ASSERT(outip != NULL);
+ return outip6;
+ }
+
+ offset += sizeof(struct ip);
+ outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip4 != NULL);
/* Per RFC4301 5.1.2.1 */
- outip->ip_v = IPVERSION;
- outip->ip_hl = 5;
- outip->ip_tos = inip->ip_tos;
- outip->ip_len = htons(rte_pktmbuf_data_len(m));
+ outip4->ip_v = IPVERSION;
+ outip4->ip_hl = 5;
+ outip4->ip_tos = ds_ecn;
+ outip4->ip_len = htons(rte_pktmbuf_data_len(m));
+
+ outip4->ip_id = 0;
+ outip4->ip_off = 0;
+
+ outip4->ip_ttl = IPDEFTTL;
+ outip4->ip_p = IPPROTO_ESP;
+
+ outip4->ip_src.s_addr = src->ip4;
+ outip4->ip_dst.s_addr = dst->ip4;
- outip->ip_id = 0;
- outip->ip_off = 0;
+ return outip4;
+}
+
+static inline struct ip *
+ip4ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 0, src, dst);
+}
- outip->ip_ttl = IPDEFTTL;
- outip->ip_p = IPPROTO_ESP;
+static inline struct ip6_hdr *
+ip6ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 1, src, dst);
+}
- outip->ip_src.s_addr = src;
- outip->ip_dst.s_addr = dst;
+static inline void
+ip4_ecn_setup(struct ip *ip4)
+{
+ if (ip4->ip_tos & IPTOS_ECN_MASK)
+ ip4->ip_tos |= IPTOS_ECN_CE;
+}
- return outip;
+static inline void
+ip6_ecn_setup(struct ip6_hdr *ip6)
+{
+ if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK)
+ ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) |
+ (IPTOS_ECN_CE << 20));
}
-static inline int
-ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)
+static inline void
+ipip_inbound(struct rte_mbuf *m, uint32_t offset)
{
- struct ip *inip;
- struct ip *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint32_t ip_len, set_ecn;
- outip = rte_pktmbuf_mtod(m, struct ip*);
+ outip4 = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(outip->ip_v == IPVERSION);
+ RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
- inip = (struct ip *)rte_pktmbuf_adj(m, offset);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ if (outip4->ip_v == IPVERSION) {
+ ip_len = sizeof(struct ip);
+ set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ } else {
+ outip6 = (struct ip6_hdr *)outip4;
+ ip_len = sizeof(struct ip6_hdr);
+ set_ecn = ntohl(outip6->ip6_flow) >> 20;
+ set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ }
+
+ inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
/* Check packet is still bigger than IP header (inner) */
- IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
+ RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len);
/* RFC4301 5.1.2.1 Note 6 */
- if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
- ((outip->ip_tos & htons(IPTOS_ECN_CE)) == IPTOS_ECN_CE))
- inip->ip_tos |= htons(IPTOS_ECN_CE);
-
- return 0;
+ if (inip4->ip_v == IPVERSION) {
+ if (set_ecn)
+ ip4_ecn_setup(inip4);
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ if (set_ecn)
+ ip6_ecn_setup(inip6);
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ }
}
#endif /* __IPIP_H__ */
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 00ab2d84..f78743d0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -38,6 +38,7 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
@@ -65,6 +66,7 @@
#include <rte_mbuf.h>
#include <rte_acl.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
@@ -192,7 +194,8 @@ struct lcore_conf {
struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
struct ipsec_ctx inbound;
struct ipsec_ctx outbound;
- struct rt_ctx *rt_ctx;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
} __rte_cache_aligned;
static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -230,27 +233,39 @@ struct traffic_type {
};
struct ipsec_traffic {
- struct traffic_type ipsec4;
- struct traffic_type ipv4;
+ struct traffic_type ipsec;
+ struct traffic_type ip4;
+ struct traffic_type ip6;
};
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
uint8_t *nlp;
+ struct ether_hdr *eth;
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
- nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
- offsetof(struct ip, ip_p));
+ eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
if (*nlp == IPPROTO_ESP)
- t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
else {
- t->ipv4.data[t->ipv4.num] = nlp;
- t->ipv4.pkts[(t->ipv4.num)++] = pkt;
+ t->ip4.data[t->ip4.num] = nlp;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
+ if (*nlp == IPPROTO_ESP)
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ else {
+ t->ip6.data[t->ip6.num] = nlp;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
}
} else {
/* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
rte_pktmbuf_free(pkt);
}
}
@@ -261,8 +276,9 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
{
int32_t i;
- t->ipsec4.num = 0;
- t->ipv4.num = 0;
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
@@ -277,14 +293,27 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
static inline void
prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
{
- pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = ETHER_HDR_LEN;
+ struct ip *ip;
+ struct ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
- struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
- ETHER_HDR_LEN);
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ } else {
+ pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ }
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
sizeof(struct ether_addr));
memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
@@ -298,7 +327,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
const int32_t prefetch_offset = 2;
for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
prepare_tx_pkt(pkts[i], port);
}
/* Process left packets */
@@ -355,94 +384,133 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
}
static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, j;
- uint32_t sa_idx, res;
-
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ uint32_t i, j, res, sa_idx;
- /* SP/ACL Inbound check ipsec and ipv4 */
- for (i = 0; i < nb_pkts_in; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
- traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
- uint8_t *, offsetof(struct ip, ip_p));
- }
+ if (ip->num == 0)
+ return;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- if (res & ~BYPASS) {
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res & DISCARD) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
- }
- /* Check return SA SPI matches pkt SPI */
- for ( ; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
- if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
- m, sa_idx)) {
+ if (res & BYPASS) {
+ ip->pkts[j++] = m;
+ continue;
+ }
+ /* Check return SA SPI matches pkt SPI */
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
}
static inline void
-process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_out, i, j;
- uint32_t sa_idx, res;
+ uint16_t idx, nb_pkts_in, i;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
- /* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ /* SP/ACL Inbound check ipsec and ip4 */
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *, offsetof(struct ip, ip_p));
+ } else if (ip->ip_v == IP6_VERSION) {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *,
+ offsetof(struct ip6_hdr, ip6_nxt));
+ } else
+ rte_pktmbuf_free(m);
+ }
+
+ inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4);
+
+ inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6);
+}
+
+static inline void
+outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
+ struct traffic_type *ipsec)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, sa_idx;
+
+ if (ip->num == 0)
+ return;
- traffic->ipsec4.num = 0;
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- sa_idx = res & PROTECT_MASK;
- if ((res == 0) || (res & DISCARD))
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
rte_pktmbuf_free(m);
else if (sa_idx != 0) {
- traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
- traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
+ ipsec->res[ipsec->num] = sa_idx;
+ ipsec->pkts[ipsec->num++] = m;
} else /* BYPASS */
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
+}
+
+static inline void
+process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ struct rte_mbuf *m;
+ uint16_t idx, nb_pkts_out, i;
+
+ /* Drop any IPsec traffic from protected ports */
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.res, traffic->ipsec4.num,
+ traffic->ipsec.num = 0;
+
+ outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+
+ outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
MAX_PKT_BURST);
for (i = 0; i < nb_pkts_out; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
}
@@ -450,47 +518,72 @@ static inline void
process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_in, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_in, i, idx;
/* Drop any IPv4 traffic from unprotected ports */
- for (i = 0; i < traffic->ipv4.num; i++)
- rte_pktmbuf_free(traffic->ipv4.pkts[i]);
+ for (i = 0; i < traffic->ip4.num; i++)
+ rte_pktmbuf_free(traffic->ip4.pkts[i]);
- traffic->ipv4.num = 0;
+ traffic->ip4.num = 0;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ /* Drop any IPv6 traffic from unprotected ports */
+ for (i = 0; i < traffic->ip6.num; i++)
+ rte_pktmbuf_free(traffic->ip6.pkts[i]);
- for (i = 0; i < nb_pkts_in; i++)
- traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
+ traffic->ip6.num = 0;
- traffic->ipv4.num = nb_pkts_in;
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
+ }
}
static inline void
process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_out, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_out, i;
+ struct ip *ip;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- traffic->ipsec4.num = 0;
+ traffic->ipsec.num = 0;
- for (i = 0; i < traffic->ipv4.num; i++)
- traffic->ipv4.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip4.num; i++)
+ traffic->ip4.res[i] = single_sa_idx;
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
- traffic->ipv4.res, traffic->ipv4.num,
+ for (i = 0; i < traffic->ip6.num; i++)
+ traffic->ip6.res[i] = single_sa_idx;
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
+ traffic->ip4.res, traffic->ip4.num,
MAX_PKT_BURST);
- traffic->ipv4.num = nb_pkts_out;
+ /* They all sue the same SA (ip4 or ip6 tunnel) */
+ m = traffic->ipsec.pkts[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ traffic->ip4.num = nb_pkts_out;
+ else
+ traffic->ip6.num = nb_pkts_out;
}
static inline void
-route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
@@ -518,6 +611,35 @@ route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int16_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ uint16_t i, offset;
+
+ if (nb_pkts == 0)
+ return;
+
+ for (i = 0; i < nb_pkts; i++) {
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
+ memcpy(&dst_ip[i][0], ip6_dst, 16);
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
+ hop, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (hop[i] == -1) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+ send_single_packet(pkts[i], hop[i] & 0xff);
+ }
+}
+
+static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint8_t portid)
{
@@ -525,7 +647,7 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
prepare_traffic(pkts, &traffic, nb_pkts);
- if (single_sa) {
+ if (unlikely(single_sa)) {
if (UNPROTECTED_PORT(portid))
process_pkts_inbound_nosp(&qconf->inbound, &traffic);
else
@@ -537,7 +659,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
static inline void
@@ -576,12 +699,15 @@ main_loop(__attribute__((unused)) void *dummy)
rxql = qconf->rx_queue_list;
socket_id = rte_lcore_to_socket_id(lcore_id);
- qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
- qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
- qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
+ qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
+ qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+ qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+ qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
- qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
- qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
+ qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+ qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+ qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
if (qconf->nb_rx_queue == 0) {
@@ -636,8 +762,6 @@ check_params(void)
}
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -762,7 +886,7 @@ parse_config(const char *q_arg)
FLD_LCORE,
_NUM_FLD
};
- int long int_fld[_NUM_FLD];
+ unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
@@ -1286,8 +1410,6 @@ main(int32_t argc, char **argv)
unprotected_port_mask);
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_params() < 0)
rte_exit(EXIT_FAILURE, "check_params failed\n");
@@ -1313,7 +1435,9 @@ main(int32_t argc, char **argv)
sa_init(&socket_ctx[socket_id], socket_id, ep);
- sp_init(&socket_ctx[socket_id], socket_id, ep);
+ sp4_init(&socket_ctx[socket_id], socket_id, ep);
+
+ sp6_init(&socket_ctx[socket_id], socket_id, ep);
rt_init(&socket_ctx[socket_id], socket_id, ep);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index baf30d4b..1e87d0df 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -42,11 +42,12 @@
#include <rte_hash.h>
#include "ipsec.h"
+#include "esp.h"
static inline int
create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
{
- uint32_t cdev_id_qp = 0;
+ unsigned long cdev_id_qp = 0;
int32_t ret;
struct cdev_key key = { 0 };
@@ -58,14 +59,15 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
- IPSEC_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
+ RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u\n", key.lcore_id, key.cipher_algo,
key.auth_algo);
return -1;
}
- IPSEC_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
- "%u qp %u\n", sa->spi, ipsec_ctx->tbl[cdev_id_qp].id,
+ RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ "%u qp %u\n", sa->spi,
+ ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
@@ -79,7 +81,7 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int ret, i;
+ int32_t ret, i;
cqp->buf[cqp->len++] = cop;
@@ -87,7 +89,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- IPSEC_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
@@ -99,17 +101,21 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
}
}
-static inline uint16_t
-ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
- struct ipsec_sa *sas[], uint16_t nb_pkts, uint16_t max_pkts)
+static inline void
+ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
+ uint16_t nb_pkts)
{
- int ret = 0, i, j, nb_cops;
+ int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
- struct rte_crypto_op *cops[max_pkts];
struct ipsec_sa *sa;
- struct rte_mbuf *pkt;
for (i = 0; i < nb_pkts; i++) {
+ if (unlikely(sas[i] == NULL)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
rte_prefetch0(sas[i]);
rte_prefetch0(pkts[i]);
@@ -117,8 +123,6 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
sa = sas[i];
priv->sa = sa;
- IPSEC_ASSERT(sa != NULL);
-
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
rte_prefetch0(&priv->sym_cop);
@@ -133,17 +137,27 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
rte_crypto_op_attach_sym_session(&priv->cop,
sa->crypto_session);
- ret = sa->pre_crypto(pkts[i], sa, &priv->cop);
+ ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
- IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+ RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
}
+}
+
+static inline int
+ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
+{
+ int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_crypto_op *cops[max_pkts];
+ struct ipsec_sa *sa;
+ struct rte_mbuf *pkt;
- nb_pkts = 0;
for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
struct cdev_qp *cqp;
@@ -166,9 +180,9 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
priv = get_priv(pkt);
sa = priv->sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
- ret = sa->post_crypto(pkt, sa, cops[j]);
+ ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret))
rte_pktmbuf_free(pkt);
else
@@ -188,7 +202,9 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
uint16_t
@@ -199,5 +215,7 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index a13fdef9..0d2ee254 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -37,7 +37,6 @@
#include <stdint.h>
#include <rte_byteorder.h>
-#include <rte_ip.h>
#include <rte_crypto.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -47,30 +46,18 @@
#define MAX_PKT_BURST 32
#define MAX_QP_PER_LCORE 256
-#ifdef IPSEC_DEBUG
-#define IPSEC_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#define IPSEC_LOG RTE_LOG
-#else
-#define IPSEC_ASSERT(exp) do {} while (0)
-#define IPSEC_LOG(...) do {} while (0)
-#endif /* IPSEC_DEBUG */
-
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
#define uint32_t_to_char(ip, a, b, c, d) do {\
- *a = (unsigned char)(ip >> 24 & 0xff);\
- *b = (unsigned char)(ip >> 16 & 0xff);\
- *c = (unsigned char)(ip >> 8 & 0xff);\
- *d = (unsigned char)(ip & 0xff);\
+ *a = (uint8_t)(ip >> 24 & 0xff);\
+ *b = (uint8_t)(ip >> 16 & 0xff);\
+ *c = (uint8_t)(ip >> 8 & 0xff);\
+ *d = (uint8_t)(ip & 0xff);\
} while (0)
#define DEFAULT_MAX_CATEGORIES 1
-#define IPSEC_SA_MAX_ENTRIES (64) /* must be power of 2, max 2 power 30 */
+#define IPSEC_SA_MAX_ENTRIES (128) /* must be power of 2, max 2 power 30 */
#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1))
#define INVALID_SPI (0)
@@ -81,6 +68,8 @@ if (!(exp)) { \
#define IPSEC_XFORM_MAX 2
+#define IP6_VERSION (6)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_cryptodev_session;
@@ -88,25 +77,36 @@ struct rte_mbuf;
struct ipsec_sa;
-typedef int (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
+typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
+struct ip_addr {
+ union {
+ uint32_t ip4;
+ union {
+ uint64_t ip6[2];
+ uint8_t ip6_b[16];
+ };
+ };
+};
+
struct ipsec_sa {
uint32_t spi;
uint32_t cdev_id_qp;
- uint32_t src;
- uint32_t dst;
struct rte_cryptodev_sym_session *crypto_session;
- struct rte_crypto_sym_xform *xforms;
- ipsec_xform_fn pre_crypto;
- ipsec_xform_fn post_crypto;
+ uint32_t seq;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
uint16_t digest_len;
uint16_t iv_len;
uint16_t block_size;
uint16_t flags;
- uint32_t seq;
+#define IP4_TUNNEL (1 << 0)
+#define IP6_TUNNEL (1 << 1)
+#define TRANSPORT (1 << 2)
+ struct ip_addr src;
+ struct ip_addr dst;
+ struct rte_crypto_sym_xform *xforms;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@@ -125,7 +125,8 @@ struct cdev_qp {
struct ipsec_ctx {
struct rte_hash *cdev_map;
- struct sp_ctx *sp_ctx;
+ struct sp_ctx *sp4_ctx;
+ struct sp_ctx *sp6_ctx;
struct sa_ctx *sa_ctx;
uint16_t nb_qps;
uint16_t last_qp;
@@ -139,11 +140,14 @@ struct cdev_key {
};
struct socket_ctx {
- struct sa_ctx *sa_ipv4_in;
- struct sa_ctx *sa_ipv4_out;
- struct sp_ctx *sp_ipv4_in;
- struct sp_ctx *sp_ipv4_out;
- struct rt_ctx *rt_ipv4;
+ struct sa_ctx *sa_in;
+ struct sa_ctx *sa_out;
+ struct sp_ctx *sp_ip4_in;
+ struct sp_ctx *sp_ip4_out;
+ struct sp_ctx *sp_ip6_in;
+ struct sp_ctx *sp_ip6_out;
+ struct rt_ctx *rt_ip4;
+ struct rt_ctx *rt_ip6;
struct rte_mempool *mbuf_pool;
};
@@ -179,12 +183,15 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts);
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c
index a6d0866a..fa5f0420 100644
--- a/examples/ipsec-secgw/rt.c
+++ b/examples/ipsec-secgw/rt.c
@@ -36,110 +36,237 @@
*/
#include <sys/types.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
-#define RT_IPV4_MAX_RULES 64
+#define RT_IPV4_MAX_RULES 1024
+#define RT_IPV6_MAX_RULES 1024
-struct ipv4_route {
+struct ip4_route {
uint32_t ip;
- uint8_t depth;
- uint8_t if_out;
+ uint8_t depth;
+ uint8_t if_out;
};
-/* In the default routing table we have:
- * ep0 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep0[] = {
+struct ip6_route {
+ uint8_t ip[16];
+ uint8_t depth;
+ uint8_t if_out;
+};
+
+static struct ip4_route rt_ip4_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
{ IPv4(172, 16, 2, 5), 32, 0 },
- { IPv4(172, 16, 2, 6), 32, 0 },
- { IPv4(172, 16, 2, 7), 32, 1 },
- { IPv4(172, 16, 2, 8), 32, 1 },
+ { IPv4(172, 16, 2, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 0 },
+ { IPv4(192, 168, 176, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 0 },
+ { IPv4(192, 168, 241, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
{ IPv4(192, 168, 115, 0), 24, 2 },
- { IPv4(192, 168, 116, 0), 24, 2 },
- { IPv4(192, 168, 117, 0), 24, 3 },
- { IPv4(192, 168, 118, 0), 24, 3 },
-
+ { IPv4(192, 168, 116, 0), 24, 3 },
+ { IPv4(192, 168, 65, 0), 24, 2 },
+ { IPv4(192, 168, 66, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 2 },
+ { IPv4(192, 168, 186, 0), 24, 3 },
+ /* NULL */
{ IPv4(192, 168, 210, 0), 24, 2 },
+ { IPv4(192, 168, 211, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 2 },
+ { IPv4(192, 168, 246, 0), 24, 3 },
+};
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+static struct ip6_route rt_ip6_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, 116, 0 },
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
-/* In the default routing table we have:
- * ep1 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep1[] = {
- { IPv4(172, 16, 1, 5), 32, 2 },
- { IPv4(172, 16, 1, 6), 32, 2 },
- { IPv4(172, 16, 1, 7), 32, 3 },
- { IPv4(172, 16, 1, 8), 32, 3 },
+static struct ip4_route rt_ip4_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { IPv4(172, 16, 1, 5), 32, 0 },
+ { IPv4(172, 16, 1, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 0 },
+ { IPv4(192, 168, 186, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 0 },
+ { IPv4(192, 168, 246, 0), 24, 1 },
- { IPv4(192, 168, 105, 0), 24, 0 },
- { IPv4(192, 168, 106, 0), 24, 0 },
- { IPv4(192, 168, 107, 0), 24, 1 },
- { IPv4(192, 168, 108, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { IPv4(192, 168, 105, 0), 24, 2 },
+ { IPv4(192, 168, 106, 0), 24, 3 },
+ { IPv4(192, 168, 55, 0), 24, 2 },
+ { IPv4(192, 168, 56, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 2 },
+ { IPv4(192, 168, 176, 0), 24, 3 },
+ /* NULL */
+ { IPv4(192, 168, 200, 0), 24, 2 },
+ { IPv4(192, 168, 201, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 2 },
+ { IPv4(192, 168, 241, 0), 24, 3 },
+};
- { IPv4(192, 168, 200, 0), 24, 0 },
+static struct ip6_route rt_ip6_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, 116, 0 },
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+ /* Inbound */
+ /* Tunnels */
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
char name[PATH_MAX];
- unsigned i;
- int ret;
+ uint32_t i;
+ int32_t ret;
struct rte_lpm *lpm;
- struct ipv4_route *rt;
+ struct rte_lpm6 *lpm6;
+ struct ip4_route *rt;
+ struct ip6_route *rt6;
char a, b, c, d;
- unsigned nb_routes;
+ uint32_t nb_routes, nb_routes6;
struct rte_lpm_config conf = { 0 };
+ struct rte_lpm6_config conf6 = { 0 };
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->rt_ipv4 != NULL)
- rte_exit(EXIT_FAILURE, "Routing Table for socket %u already "
- "initialized\n", socket_id);
+ if (ctx->rt_ip4 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->rt_ip6 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u "
+ "already initialized\n", socket_id);
- printf("Creating Routing Table (RT) context with %u max routes\n",
+ printf("Creating IPv4 Routing Table (RT) context with %u max routes\n",
RT_IPV4_MAX_RULES);
if (ep == 0) {
- rt = rt_ipv4_ep0;
- nb_routes = RTE_DIM(rt_ipv4_ep0);
+ rt = rt_ip4_ep0;
+ nb_routes = RTE_DIM(rt_ip4_ep0);
+ rt6 = rt_ip6_ep0;
+ nb_routes6 = RTE_DIM(rt_ip6_ep0);
} else if (ep == 1) {
- rt = rt_ipv4_ep1;
- nb_routes = RTE_DIM(rt_ipv4_ep1);
+ rt = rt_ip4_ep1;
+ nb_routes = RTE_DIM(rt_ip4_ep1);
+ rt6 = rt_ip6_ep1;
+ nb_routes6 = RTE_DIM(rt_ip6_ep1);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 "
"supported.\n", ep);
/* create the LPM table */
- snprintf(name, sizeof(name), "%s_%u", "rt_ipv4", socket_id);
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id);
conf.max_rules = RT_IPV4_MAX_RULES;
conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
lpm = rte_lpm_create(name, socket_id, &conf);
if (lpm == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create LPM table "
- "on socket %d\n", socket_id);
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
/* populate the LPM table */
for (i = 0; i < nb_routes; i++) {
ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Unable to add entry num %u to "
- "LPM table on socket %d\n", i, socket_id);
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
uint32_t_to_char(rt[i].ip, &a, &b, &c, &d);
printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n",
a, b, c, d, rt[i].depth, rt[i].if_out);
}
- ctx->rt_ipv4 = (struct rt_ctx *)lpm;
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id);
+ conf6.max_rules = RT_IPV6_MAX_RULES;
+ conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
+ lpm6 = rte_lpm6_create(name, socket_id, &conf6);
+ if (lpm6 == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
+
+ /* populate the LPM table */
+ for (i = 0; i < nb_routes6; i++) {
+ ret = rte_lpm6_add(lpm6, rt6[i].ip, rt6[i].depth,
+ rt6[i].if_out);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
+
+ printf("LPM6: Adding route "
+ " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n",
+ (uint16_t)((rt6[i].ip[0] << 8) | rt6[i].ip[1]),
+ (uint16_t)((rt6[i].ip[2] << 8) | rt6[i].ip[3]),
+ (uint16_t)((rt6[i].ip[4] << 8) | rt6[i].ip[5]),
+ (uint16_t)((rt6[i].ip[6] << 8) | rt6[i].ip[7]),
+ (uint16_t)((rt6[i].ip[8] << 8) | rt6[i].ip[9]),
+ (uint16_t)((rt6[i].ip[10] << 8) | rt6[i].ip[11]),
+ (uint16_t)((rt6[i].ip[12] << 8) | rt6[i].ip[13]),
+ (uint16_t)((rt6[i].ip[14] << 8) | rt6[i].ip[15]),
+ rt6[i].depth, rt6[i].if_out);
+ }
+
+ ctx->rt_ip4 = (struct rt_ctx *)lpm;
+ ctx->rt_ip6 = (struct rt_ctx *)lpm6;
}
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index b6260ede..ab18b811 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -37,170 +37,200 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_memzone.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
#include "esp.h"
-/* SAs EP0 Outbound */
-const struct ipsec_sa sa_ep0_out[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Outbound */
+const struct ipsec_sa sa_out[] = {
+ {
+ .spi = 5,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 6,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 10,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 11,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 15,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 16,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 25,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 26,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
-/* SAs EP0 Inbound */
-const struct ipsec_sa sa_ep0_in[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Outbound */
-const struct ipsec_sa sa_ep1_out[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Inbound */
-const struct ipsec_sa sa_ep1_in[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Inbound */
+const struct ipsec_sa sa_in[] = {
+ {
+ .spi = 105,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 106,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 110,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 111,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 115,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 116,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 125,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 126,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
static uint8_t cipher_key[256] = "sixteenbytes key";
@@ -265,11 +295,11 @@ struct sa_ctx {
};
static struct sa_ctx *
-sa_ipv4_create(const char *name, int socket_id)
+sa_create(const char *name, int32_t socket_id)
{
char s[PATH_MAX];
struct sa_ctx *sa_ctx;
- unsigned mz_size;
+ uint32_t mz_size;
const struct rte_memzone *mz;
snprintf(s, sizeof(s), "%s_%u", name, socket_id);
@@ -294,10 +324,10 @@ sa_ipv4_create(const char *name, int socket_id)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries, unsigned inbound)
+ uint32_t nb_entries, uint32_t inbound)
{
struct ipsec_sa *sa;
- unsigned i, idx;
+ uint32_t i, idx;
for (i = 0; i < nb_entries; i++) {
idx = SPI2IDX(entries[i].spi);
@@ -308,8 +338,14 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return -EINVAL;
}
*sa = entries[i];
- sa->src = rte_cpu_to_be_32(sa->src);
- sa->dst = rte_cpu_to_be_32(sa->dst);
+ sa->seq = 0;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ sa->src.ip4 = rte_cpu_to_be_32(sa->src.ip4);
+ sa->dst.ip4 = rte_cpu_to_be_32(sa->dst.ip4);
+ }
+
if (inbound) {
if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
sa_ctx->xf[idx].a = null_auth_xf;
@@ -337,65 +373,65 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 0);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 1);
}
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const struct ipsec_sa *sa_out_entries, *sa_in_entries;
- unsigned nb_out_entries, nb_in_entries;
+ uint32_t nb_out_entries, nb_in_entries;
const char *name;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sa_ipv4_in != NULL)
+ if (ctx->sa_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sa_ipv4_out != NULL)
+ if (ctx->sa_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- sa_out_entries = sa_ep0_out;
- nb_out_entries = RTE_DIM(sa_ep0_out);
- sa_in_entries = sa_ep0_in;
- nb_in_entries = RTE_DIM(sa_ep0_in);
+ sa_out_entries = sa_out;
+ nb_out_entries = RTE_DIM(sa_out);
+ sa_in_entries = sa_in;
+ nb_in_entries = RTE_DIM(sa_in);
} else if (ep == 1) {
- sa_out_entries = sa_ep1_out;
- nb_out_entries = RTE_DIM(sa_ep1_out);
- sa_in_entries = sa_ep1_in;
- nb_in_entries = RTE_DIM(sa_ep1_in);
+ sa_out_entries = sa_in;
+ nb_out_entries = RTE_DIM(sa_in);
+ sa_in_entries = sa_out;
+ nb_in_entries = RTE_DIM(sa_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sa_ipv4_in";
- ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_in == NULL)
+ name = "sa_in";
+ ctx->sa_in = sa_create(name, socket_id);
+ if (ctx->sa_in == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- name = "sa_ipv4_out";
- ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_out == NULL)
+ name = "sa_out";
+ ctx->sa_out = sa_create(name, socket_id);
+ if (ctx->sa_out == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);
+ sa_in_add_rules(ctx->sa_in, sa_in_entries, nb_in_entries);
- sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
+ sa_out_add_rules(ctx->sa_out, sa_out_entries, nb_out_entries);
}
int
@@ -408,38 +444,66 @@ inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
}
+static inline void
+single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
+ struct ipsec_sa **sa_ret)
+{
+ struct esp_hdr *esp;
+ struct ip *ip;
+ uint32_t *src4_addr;
+ uint8_t *src6_addr;
+ struct ipsec_sa *sa;
+
+ *sa_ret = NULL;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ esp = (struct esp_hdr *)(ip + 1);
+ else
+ esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
+
+ if (esp->spi == INVALID_SPI)
+ return;
+
+ sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
+ if (rte_be_to_cpu_32(esp->spi) != sa->spi)
+ return;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
+ if ((ip->ip_v == IPVERSION) &&
+ (sa->src.ip4 == *src4_addr) &&
+ (sa->dst.ip4 == *(src4_addr + 1)))
+ *sa_ret = sa;
+ break;
+ case IP6_TUNNEL:
+ src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
+ if ((ip->ip_v == IP6_VERSION) &&
+ !memcmp(&sa->src.ip6, src6_addr, 16) &&
+ !memcmp(&sa->dst.ip6, src6_addr + 16, 16))
+ *sa_ret = sa;
+ break;
+ case TRANSPORT:
+ *sa_ret = sa;
+ }
+}
+
void
inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
- uint32_t *src, spi;
-
- for (i = 0; i < nb_pkts; i++) {
- spi = rte_pktmbuf_mtod_offset(pkts[i], struct esp_hdr *,
- sizeof(struct ip))->spi;
-
- if (spi == INVALID_SPI)
- continue;
+ uint32_t i;
- sa[i] = &sa_ctx->sa[SPI2IDX(spi)];
- if (spi != sa[i]->spi) {
- sa[i] = NULL;
- continue;
- }
-
- src = rte_pktmbuf_mtod_offset(pkts[i], uint32_t *,
- offsetof(struct ip, ip_src));
- if ((sa[i]->src != *src) || (sa[i]->dst != *(src + 1)))
- sa[i] = NULL;
- }
+ for (i = 0; i < nb_pkts; i++)
+ single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
}
void
outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
+ uint32_t i;
for (i = 0; i < nb_pkts; i++)
sa[i] = &sa_ctx->sa[sa_idx[i]];
diff --git a/examples/ipsec-secgw/sp.c b/examples/ipsec-secgw/sp4.c
index 4f167301..9c4b256b 100644
--- a/examples/ipsec-secgw/sp.c
+++ b/examples/ipsec-secgw/sp4.c
@@ -39,6 +39,7 @@
#include <netinet/ip.h>
#include <rte_acl.h>
+#include <rte_ip.h>
#include "ipsec.h"
@@ -71,7 +72,7 @@ enum {
RTE_ACL_IPV4_NUM
};
-struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
@@ -110,9 +111,9 @@ struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
},
};
-RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ipv4_defs));
+RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs));
-const struct acl4_rules acl4_rules_in[] = {
+const struct acl4_rules acl4_rules_out[] = {
{
.data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
/* destination IPv4 */
@@ -124,7 +125,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 106, 0),
.mask_range.u32 = 24,},
@@ -134,9 +135,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 107, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 175, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -144,9 +145,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 108, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 176, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -154,7 +155,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 200, 0),
.mask_range.u32 = 24,},
@@ -164,9 +165,49 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 250, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 201, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 55, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 56, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 241, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -175,9 +216,9 @@ const struct acl4_rules acl4_rules_in[] = {
}
};
-const struct acl4_rules acl4_rules_out[] = {
+const struct acl4_rules acl4_rules_in[] = {
{
- .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ .data = {.userdata = PROTECT(105), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 115, 0),
.mask_range.u32 = 24,},
@@ -187,7 +228,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(106), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 116, 0),
.mask_range.u32 = 24,},
@@ -197,9 +238,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 117, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 185, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -207,9 +248,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 118, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 186, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -217,7 +258,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(115), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 210, 0),
.mask_range.u32 = 24,},
@@ -227,9 +268,49 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(116), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 211, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 65, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 66, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 245, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 246, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -239,9 +320,9 @@ const struct acl4_rules acl4_rules_out[] = {
};
static void
-print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
+print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra)
{
- unsigned char a, b, c, d;
+ uint8_t a, b, c, d;
uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
@@ -266,20 +347,20 @@ print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
}
static inline void
-dump_ipv4_rules(const struct acl4_rules *rule, int num, int extra)
+dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra)
{
- int i;
+ int32_t i;
for (i = 0; i < num; i++, rule++) {
printf("\t%d:", i + 1);
- print_one_ipv4_rule(rule, extra);
+ print_one_ip4_rule(rule, extra);
printf("\n");
}
}
static struct rte_acl_ctx *
-acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
- unsigned rules_nb)
+acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules,
+ uint32_t rules_nb)
{
char s[PATH_MAX];
struct rte_acl_param acl_param;
@@ -294,11 +375,11 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
snprintf(s, sizeof(s), "%s_%d", name, socketid);
printf("IPv4 %s entries [%u]:\n", s, rules_nb);
- dump_ipv4_rules(rules, rules_nb, 1);
+ dump_ip4_rules(rules, rules_nb, 1);
acl_param.name = s;
acl_param.socket_id = socketid;
- acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs));
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs));
acl_param.max_rule_num = MAX_ACL_RULE_NUM;
ctx = rte_acl_create(&acl_param);
@@ -313,8 +394,8 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
memset(&acl_build_param, 0, sizeof(acl_build_param));
acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
- acl_build_param.num_fields = RTE_DIM(ipv4_defs);
- memcpy(&acl_build_param.defs, ipv4_defs, sizeof(ipv4_defs));
+ acl_build_param.num_fields = RTE_DIM(ip4_defs);
+ memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs));
if (rte_acl_build(ctx, &acl_build_param) != 0)
rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
@@ -325,42 +406,42 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
}
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const char *name;
const struct acl4_rules *rules_out, *rules_in;
- unsigned nb_out_rules, nb_in_rules;
+ uint32_t nb_out_rules, nb_in_rules;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sp_ipv4_in != NULL)
+ if (ctx->sp_ip4_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sp_ipv4_out != NULL)
+ if (ctx->sp_ip4_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- rules_out = acl4_rules_in;
- nb_out_rules = RTE_DIM(acl4_rules_in);
- rules_in = acl4_rules_out;
- nb_in_rules = RTE_DIM(acl4_rules_out);
- } else if (ep == 1) {
rules_out = acl4_rules_out;
nb_out_rules = RTE_DIM(acl4_rules_out);
rules_in = acl4_rules_in;
nb_in_rules = RTE_DIM(acl4_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl4_rules_in;
+ nb_out_rules = RTE_DIM(acl4_rules_in);
+ rules_in = acl4_rules_out;
+ nb_in_rules = RTE_DIM(acl4_rules_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sp_ipv4_in";
- ctx->sp_ipv4_in = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_in";
+ ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, socket_id,
rules_in, nb_in_rules);
- name = "sp_ipv4_out";
- ctx->sp_ipv4_out = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_out";
+ ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, socket_id,
rules_out, nb_out_rules);
}
diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c
new file mode 100644
index 00000000..1dda11a4
--- /dev/null
+++ b/examples/ipsec-secgw/sp6.c
@@ -0,0 +1,448 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Security Policies
+ */
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <netinet/ip6.h>
+
+#include <rte_acl.h>
+#include <rte_ip.h>
+
+#include "ipsec.h"
+
+#define MAX_ACL_RULE_NUM 1000
+
+enum {
+ IP6_PROTO,
+ IP6_SRC0,
+ IP6_SRC1,
+ IP6_SRC2,
+ IP6_SRC3,
+ IP6_DST0,
+ IP6_DST1,
+ IP6_DST2,
+ IP6_DST3,
+ IP6_SRCP,
+ IP6_DSTP,
+ IP6_NUM
+};
+
+#define IP6_ADDR_SIZE 16
+
+struct rte_acl_field_def ip6_defs[IP6_NUM] = {
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = IP6_PROTO,
+ .input_index = IP6_PROTO,
+ .offset = 0,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC0,
+ .input_index = IP6_SRC0,
+ .offset = 2
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC1,
+ .input_index = IP6_SRC1,
+ .offset = 6
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC2,
+ .input_index = IP6_SRC2,
+ .offset = 10
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC3,
+ .input_index = IP6_SRC3,
+ .offset = 14
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST0,
+ .input_index = IP6_DST0,
+ .offset = 18
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST1,
+ .input_index = IP6_DST1,
+ .offset = 22
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST2,
+ .input_index = IP6_DST2,
+ .offset = 26
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST3,
+ .input_index = IP6_DST3,
+ .offset = 30
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_SRCP,
+ .input_index = IP6_SRCP,
+ .offset = 34
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_DSTP,
+ .input_index = IP6_SRCP,
+ .offset = 36
+ }
+};
+
+RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs));
+
+const struct acl6_rules acl6_rules_out[] = {
+ {
+ .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+const struct acl6_rules acl6_rules_in[] = {
+ {
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+static inline void
+print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra)
+{
+ uint8_t a, b, c, d;
+
+ uint32_t_to_char(rule->field[IP6_SRC0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_SRC0].mask_range.u32
+ + rule->field[IP6_SRC1].mask_range.u32
+ + rule->field[IP6_SRC2].mask_range.u32
+ + rule->field[IP6_SRC3].mask_range.u32);
+
+ uint32_t_to_char(rule->field[IP6_DST0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_DST0].mask_range.u32
+ + rule->field[IP6_DST1].mask_range.u32
+ + rule->field[IP6_DST2].mask_range.u32
+ + rule->field[IP6_DST3].mask_range.u32);
+
+ printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ",
+ rule->field[IP6_SRCP].value.u16,
+ rule->field[IP6_SRCP].mask_range.u16,
+ rule->field[IP6_DSTP].value.u16,
+ rule->field[IP6_DSTP].mask_range.u16,
+ rule->field[IP6_PROTO].value.u8,
+ rule->field[IP6_PROTO].mask_range.u8);
+ if (extra)
+ printf("0x%x-0x%x-0x%x ",
+ rule->data.category_mask,
+ rule->data.priority,
+ rule->data.userdata);
+}
+
+static inline void
+dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra)
+{
+ int32_t i;
+
+ for (i = 0; i < num; i++, rule++) {
+ printf("\t%d:", i + 1);
+ print_one_ip6_rule(rule, extra);
+ printf("\n");
+ }
+}
+
+static struct rte_acl_ctx *
+acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules,
+ uint32_t rules_nb)
+{
+ char s[PATH_MAX];
+ struct rte_acl_param acl_param;
+ struct rte_acl_config acl_build_param;
+ struct rte_acl_ctx *ctx;
+
+ printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM);
+
+ memset(&acl_param, 0, sizeof(acl_param));
+
+ /* Create ACL contexts */
+ snprintf(s, sizeof(s), "%s_%d", name, socketid);
+
+ printf("IPv4 %s entries [%u]:\n", s, rules_nb);
+ dump_ip6_rules(rules, rules_nb, 1);
+
+ acl_param.name = s;
+ acl_param.socket_id = socketid;
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs));
+ acl_param.max_rule_num = MAX_ACL_RULE_NUM;
+
+ ctx = rte_acl_create(&acl_param);
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to create ACL context\n");
+
+ if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules,
+ rules_nb) < 0)
+ rte_exit(EXIT_FAILURE, "add rules failed\n");
+
+ /* Perform builds */
+ memset(&acl_build_param, 0, sizeof(acl_build_param));
+
+ acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
+ acl_build_param.num_fields = RTE_DIM(ip6_defs);
+ memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs));
+
+ if (rte_acl_build(ctx, &acl_build_param) != 0)
+ rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
+
+ rte_acl_dump(ctx);
+
+ return ctx;
+}
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
+{
+ const char *name;
+ const struct acl6_rules *rules_out, *rules_in;
+ uint32_t nb_out_rules, nb_in_rules;
+
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "NULL context.\n");
+
+ if (ctx->sp_ip6_in != NULL)
+ rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->sp_ip6_out != NULL)
+ rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ep == 0) {
+ rules_out = acl6_rules_out;
+ nb_out_rules = RTE_DIM(acl6_rules_out);
+ rules_in = acl6_rules_in;
+ nb_in_rules = RTE_DIM(acl6_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl6_rules_in;
+ nb_out_rules = RTE_DIM(acl6_rules_in);
+ rules_in = acl6_rules_out;
+ nb_in_rules = RTE_DIM(acl6_rules_out);
+ } else
+ rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
+ "Only 0 or 1 supported.\n", ep);
+
+ name = "sp_ip6_in";
+ ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_in, nb_in_rules);
+
+ name = "sp_ip6_out";
+ ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_out, nb_out_rules);
+}