aboutsummaryrefslogtreecommitdiffstats
path: root/app/test-pmd/csumonly.c
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-pmd/csumonly.c')
-rw-r--r--app/test-pmd/csumonly.c167
1 files changed, 88 insertions, 79 deletions
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index ac4bd8f4..57e6ae27 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_memory.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -102,6 +101,8 @@ struct testpmd_offload_info {
uint16_t outer_l3_len;
uint8_t outer_l4_proto;
uint16_t tso_segsz;
+ uint16_t tunnel_tso_segsz;
+ uint32_t pkt_len;
};
/* simplified GRE header */
@@ -318,21 +319,6 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
info->l2_len = 0;
}
-/* modify the IPv4 or IPv4 source address of a packet */
-static void
-change_ip_addresses(void *l3_hdr, uint16_t ethertype)
-{
- struct ipv4_hdr *ipv4_hdr = l3_hdr;
- struct ipv6_hdr *ipv6_hdr = l3_hdr;
-
- if (ethertype == _htons(ETHER_TYPE_IPv4)) {
- ipv4_hdr->src_addr =
- rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
- } else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
- ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
- }
-}
-
/* if possible, calculate the checksum of a packet in hw or sw,
* depending on the testpmd command line configuration */
static uint64_t
@@ -344,13 +330,28 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
struct tcp_hdr *tcp_hdr;
struct sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
+ uint32_t max_pkt_len, tso_segsz = 0;
+
+ /* ensure packet is large enough to require tso */
+ if (!info->is_tunnel) {
+ max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
+ info->tso_segsz;
+ if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
+ tso_segsz = info->tso_segsz;
+ } else {
+ max_pkt_len = info->outer_l2_len + info->outer_l3_len +
+ info->l2_len + info->l3_len + info->l4_len +
+ info->tunnel_tso_segsz;
+ if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
+ tso_segsz = info->tunnel_tso_segsz;
+ }
if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_IPV4;
- if (info->tso_segsz != 0 && info->l4_proto == IPPROTO_TCP) {
+ if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
@@ -382,7 +383,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
tcp_hdr->cksum = 0;
- if (info->tso_segsz != 0) {
+ if (tso_segsz) {
ol_flags |= PKT_TX_TCP_SEG;
tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
ol_flags);
@@ -412,12 +413,10 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
return ol_flags;
}
-/* Calculate the checksum of outer header (only vxlan is supported,
- * meaning IP + UDP). The caller already checked that it's a vxlan
- * packet */
+/* Calculate the checksum of outer header */
static uint64_t
process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
- uint16_t testpmd_ol_flags)
+ uint16_t testpmd_ol_flags, int tso_enabled)
{
struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
@@ -438,10 +437,20 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
if (info->outer_l4_proto != IPPROTO_UDP)
return ol_flags;
- /* outer UDP checksum is always done in software as we have no
- * hardware supporting it today, and no API for it. */
-
udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
+
+ /* outer UDP checksum is done in software as we have no hardware
+ * supporting it today, and no API for it. In the other side, for
+ * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
+ * set to zero.
+ *
+ * If a packet will be TSOed into small packets by NIC, we cannot
+ * set/calculate a non-zero checksum, because it will be a wrong
+ * value after the packet be split into several small packets.
+ */
+ if (tso_enabled)
+ udp_hdr->dgram_cksum = 0;
+
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
@@ -609,7 +618,6 @@ pkt_copy_split(const struct rte_mbuf *pkt)
* Receive a burst of packets, and for each packet:
* - parse packet, and try to recognize a supported packet type (1)
* - if it's not a supported packet type, don't touch the packet, else:
- * - modify the IPs in inner headers and in outer headers if any
* - reprocess the checksum of all supported layers. This is done in SW
* or HW, depending on testpmd command line configuration
* - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
@@ -641,7 +649,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t i;
- uint64_t ol_flags;
+ uint64_t rx_ol_flags, tx_ol_flags;
uint16_t testpmd_ol_flags;
uint32_t retry;
uint32_t rx_bad_ip_csum;
@@ -675,19 +683,24 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
testpmd_ol_flags = txp->tx_ol_flags;
memset(&info, 0, sizeof(info));
info.tso_segsz = txp->tso_segsz;
+ info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
void *));
- ol_flags = 0;
- info.is_tunnel = 0;
m = pkts_burst[i];
+ info.is_tunnel = 0;
+ info.pkt_len = rte_pktmbuf_pkt_len(m);
+ tx_ol_flags = 0;
+ rx_ol_flags = m->ol_flags;
/* Update the L3/L4 checksum error packet statistics */
- rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
- rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
+ if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
+ rx_bad_ip_csum += 1;
+ if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
+ rx_bad_l4_csum += 1;
/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
* and inner headers */
@@ -704,18 +717,27 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
if (info.l4_proto == IPPROTO_UDP) {
struct udp_hdr *udp_hdr;
+
udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
info.l3_len);
parse_vxlan(udp_hdr, &info, m->packet_type);
+ if (info.is_tunnel)
+ tx_ol_flags |= PKT_TX_TUNNEL_VXLAN;
} else if (info.l4_proto == IPPROTO_GRE) {
struct simple_gre_hdr *gre_hdr;
+
gre_hdr = (struct simple_gre_hdr *)
((char *)l3_hdr + info.l3_len);
parse_gre(gre_hdr, &info);
+ if (info.is_tunnel)
+ tx_ol_flags |= PKT_TX_TUNNEL_GRE;
} else if (info.l4_proto == IPPROTO_IPIP) {
void *encap_ip_hdr;
+
encap_ip_hdr = (char *)l3_hdr + info.l3_len;
parse_encap_ip(encap_ip_hdr, &info);
+ if (info.is_tunnel)
+ tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
}
}
@@ -725,38 +747,35 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
}
- /* step 2: change all source IPs (v4 or v6) so we need
- * to recompute the chksums even if they were correct */
-
- change_ip_addresses(l3_hdr, info.ethertype);
- if (info.is_tunnel == 1)
- change_ip_addresses(outer_l3_hdr, info.outer_ethertype);
-
- /* step 3: depending on user command line configuration,
+ /* step 2: depending on user command line configuration,
* recompute checksum either in software or flag the
* mbuf to offload the calculation to the NIC. If TSO
* is configured, prepare the mbuf for TCP segmentation. */
/* process checksums of inner headers first */
- ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags);
+ tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
+ testpmd_ol_flags);
/* Then process outer headers if any. Note that the software
* checksum will be wrong if one of the inner checksums is
* processed in hardware. */
if (info.is_tunnel == 1) {
- ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
- testpmd_ol_flags);
+ tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
+ testpmd_ol_flags,
+ !!(tx_ol_flags & PKT_TX_TCP_SEG));
}
- /* step 4: fill the mbuf meta data (flags and header lengths) */
+ /* step 3: fill the mbuf meta data (flags and header lengths) */
if (info.is_tunnel == 1) {
- if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
+ if (info.tunnel_tso_segsz ||
+ testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
m->l3_len = info.l3_len;
m->l4_len = info.l4_len;
+ m->tso_segsz = info.tunnel_tso_segsz;
}
else {
/* if there is a outer UDP cksum
@@ -776,9 +795,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
m->l2_len = info.l2_len;
m->l3_len = info.l3_len;
m->l4_len = info.l4_len;
+ m->tso_segsz = info.tso_segsz;
}
- m->tso_segsz = info.tso_segsz;
- m->ol_flags = ol_flags;
+ m->ol_flags = tx_ol_flags;
/* Do split & copy for the packet. */
if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
@@ -792,32 +811,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* if verbose mode is enabled, dump debug info */
if (verbose_level > 0) {
- struct {
- uint64_t flag;
- uint64_t mask;
- } tx_flags[] = {
- { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
- { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
- { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
- { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
- { PKT_TX_IPV4, PKT_TX_IPV4 },
- { PKT_TX_IPV6, PKT_TX_IPV6 },
- { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
- { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
- { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
- { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
- };
- unsigned j;
- const char *name;
+ char buf[256];
printf("-----------------\n");
- printf("mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n",
- m, m->pkt_len, m->nb_segs);
+ printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n",
+ fs->rx_port, m, m->pkt_len, m->nb_segs);
/* dump rx parsed packet info */
+ rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
printf("rx: l2_len=%d ethertype=%x l3_len=%d "
- "l4_proto=%d l4_len=%d\n",
+ "l4_proto=%d l4_len=%d flags=%s\n",
info.l2_len, rte_be_to_cpu_16(info.ethertype),
- info.l3_len, info.l4_proto, info.l4_len);
+ info.l3_len, info.l4_proto, info.l4_len, buf);
+ if (rx_ol_flags & PKT_RX_LRO)
+ printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
if (info.is_tunnel == 1)
printf("rx: outer_l2_len=%d outer_ethertype=%x "
"outer_l3_len=%d\n", info.outer_l2_len,
@@ -832,19 +838,22 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
printf("tx: m->l2_len=%d m->l3_len=%d "
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
- if ((info.is_tunnel == 1) &&
- (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM))
- printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n",
- m->outer_l2_len, m->outer_l3_len);
- if (info.tso_segsz != 0)
+ if (info.is_tunnel == 1) {
+ if (testpmd_ol_flags &
+ TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ printf("tx: m->outer_l2_len=%d "
+ "m->outer_l3_len=%d\n",
+ m->outer_l2_len,
+ m->outer_l3_len);
+ if (info.tunnel_tso_segsz != 0 &&
+ (m->ol_flags & PKT_TX_TCP_SEG))
+ printf("tx: m->tso_segsz=%d\n",
+ m->tso_segsz);
+ } else if (info.tso_segsz != 0 &&
+ (m->ol_flags & PKT_TX_TCP_SEG))
printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
- printf("tx: flags=");
- for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
- name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
- if ((m->ol_flags & tx_flags[j].mask) ==
- tx_flags[j].flag)
- printf("%s ", name);
- }
+ rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
+ printf("tx: flags=%s", buf);
printf("\n");
}
}