From b63264c8342e6a1b6971c79550d2af2024b6a4de Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Tue, 14 Aug 2018 18:52:30 +0100 Subject: New upstream version 18.08 Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi --- app/test-pmd/csumonly.c | 108 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 96 insertions(+), 12 deletions(-) (limited to 'app/test-pmd/csumonly.c') diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c index 5f5ab64a..49482926 100644 --- a/app/test-pmd/csumonly.c +++ b/app/test-pmd/csumonly.c @@ -49,9 +49,12 @@ #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) -#define GRE_KEY_PRESENT 0x2000 -#define GRE_KEY_LEN 4 -#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT +#define GRE_CHECKSUM_PRESENT 0x8000 +#define GRE_KEY_PRESENT 0x2000 +#define GRE_SEQUENCE_PRESENT 0x1000 +#define GRE_EXT_LEN 4 +#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ + GRE_SEQUENCE_PRESENT) /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN @@ -60,6 +63,8 @@ #define _htons(x) (x) #endif +uint16_t vxlan_gpe_udp_port = 4790; + /* structure that caches offload info for the current packet */ struct testpmd_offload_info { uint16_t ethertype; @@ -194,6 +199,70 @@ parse_vxlan(struct udp_hdr *udp_hdr, info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */ } +/* Parse a vxlan-gpe header */ +static void +parse_vxlan_gpe(struct udp_hdr *udp_hdr, + struct testpmd_offload_info *info) +{ + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + struct vxlan_gpe_hdr *vxlan_gpe_hdr; + uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr); + + /* Check udp destination port. */ + if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port)) + return; + + vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)((char *)udp_hdr + + sizeof(struct udp_hdr)); + + if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto == + VXLAN_GPE_TYPE_IPV4) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = _htons(ETHER_TYPE_IPv4); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + info->ethertype = _htons(ETHER_TYPE_IPv6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + eth_hdr = (struct ether_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ethernet(eth_hdr, info); + } else + return; + + info->l2_len += ETHER_VXLAN_GPE_HLEN; +} + /* Parse a gre header */ static void parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) @@ -203,14 +272,14 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) struct ipv6_hdr *ipv6_hdr; uint8_t gre_len = 0; - /* check which fields are supported */ - if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0) - return; - gre_len += sizeof(struct simple_gre_hdr); if (gre_hdr->flags & _htons(GRE_KEY_PRESENT)) - gre_len += GRE_KEY_LEN; + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT)) + gre_len += GRE_EXT_LEN; if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) { info->is_tunnel = 1; @@ -342,6 +411,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, info->ethertype); } } + if (info->gso_enable) + ol_flags |= PKT_TX_UDP_SEG; } else if (info->l4_proto == IPPROTO_TCP) { tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len); tcp_hdr->cksum = 0; @@ -588,6 +659,10 @@ pkt_copy_split(const struct rte_mbuf *pkt) * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP . * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 / + * UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP @@ -664,7 +739,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) m = pkts_burst[i]; info.is_tunnel = 0; info.pkt_len = rte_pktmbuf_pkt_len(m); - tx_ol_flags = 0; + tx_ol_flags = m->ol_flags & + (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF); rx_ol_flags = m->ol_flags; /* Update the L3/L4 checksum error packet statistics */ @@ -691,9 +767,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info.l3_len); - parse_vxlan(udp_hdr, &info, m->packet_type); - if (info.is_tunnel) - tx_ol_flags |= PKT_TX_TUNNEL_VXLAN; + parse_vxlan_gpe(udp_hdr, &info); + if (info.is_tunnel) { + tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE; + } else { + parse_vxlan(udp_hdr, &info, + m->packet_type); + if (info.is_tunnel) + tx_ol_flags |= + PKT_TX_TUNNEL_VXLAN; + } } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; @@ -738,6 +821,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) /* step 3: fill the mbuf meta data (flags and header lengths) */ + m->tx_offload = 0; if (info.is_tunnel == 1) { if (info.tunnel_tso_segsz || (tx_offloads & -- cgit 1.2.3-korg