summaryrefslogtreecommitdiffstats
path: root/app/test-pmd/csumonly.c
diff options
context:
space:
mode:
Diffstat (limited to 'app/test-pmd/csumonly.c')
-rw-r--r--app/test-pmd/csumonly.c75
1 files changed, 65 insertions, 10 deletions
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 90c81198..aa29f5fc 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -49,7 +49,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -70,6 +69,8 @@
#include <rte_string_fns.h>
#include <rte_flow.h>
#include <rte_gro.h>
+#include <rte_gso.h>
+
#include "testpmd.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
@@ -91,6 +92,7 @@
/* structure that caches offload info for the current packet */
struct testpmd_offload_info {
uint16_t ethertype;
+ uint8_t gso_enable;
uint16_t l2_len;
uint16_t l3_len;
uint16_t l4_len;
@@ -381,6 +383,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
get_udptcp_checksum(l3_hdr, tcp_hdr,
info->ethertype);
}
+ if (info->gso_enable)
+ ol_flags |= PKT_TX_TCP_SEG;
} else if (info->l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
sctp_hdr->cksum = 0;
@@ -627,10 +631,16 @@ static void
pkt_burst_checksum_forward(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
+ struct rte_gso_ctx *gso_ctx;
+ struct rte_mbuf **tx_pkts_burst;
struct rte_port *txp;
struct rte_mbuf *m, *p;
struct ether_hdr *eth_hdr;
void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
+ void **gro_ctx;
+ uint16_t gro_pkts_num;
+ uint8_t gro_enable;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_prep;
@@ -641,6 +651,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
struct testpmd_offload_info info;
+ uint16_t nb_segments = 0;
+ int ret;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -657,23 +669,21 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
nb_pkt_per_burst);
if (unlikely(nb_rx == 0))
return;
- if (unlikely(gro_ports[fs->rx_port].enable))
- nb_rx = rte_gro_reassemble_burst(pkts_burst,
- nb_rx,
- &(gro_ports[fs->rx_port].param));
-
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
fs->rx_packets += nb_rx;
rx_bad_ip_csum = 0;
rx_bad_l4_csum = 0;
+ gro_enable = gro_ports[fs->rx_port].enable;
txp = &ports[fs->tx_port];
testpmd_ol_flags = txp->tx_ol_flags;
memset(&info, 0, sizeof(info));
info.tso_segsz = txp->tso_segsz;
info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
+ if (gso_ports[fs->tx_port].enable)
+ info.gso_enable = 1;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
@@ -851,13 +861,57 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
+ if (unlikely(gro_enable)) {
+ if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
+ nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
+ &(gro_ports[fs->rx_port].param));
+ } else {
+ gro_ctx = current_fwd_lcore()->gro_ctx;
+ nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
+
+ if (++fs->gro_times >= gro_flush_cycles) {
+ gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
+ if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
+ gro_pkts_num = MAX_PKT_BURST - nb_rx;
+
+ nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
+ RTE_GRO_TCP_IPV4,
+ &pkts_burst[nb_rx],
+ gro_pkts_num);
+ fs->gro_times = 0;
+ }
+ }
+ }
+
+ if (gso_ports[fs->tx_port].enable == 0)
+ tx_pkts_burst = pkts_burst;
+ else {
+ gso_ctx = &(current_fwd_lcore()->gso_ctx);
+ gso_ctx->gso_size = gso_max_segment_size;
+ for (i = 0; i < nb_rx; i++) {
+ ret = rte_gso_segment(pkts_burst[i], gso_ctx,
+ &gso_segments[nb_segments],
+ GSO_MAX_PKT_BURST - nb_segments);
+ if (ret >= 0)
+ nb_segments += ret;
+ else {
+ RTE_LOG(DEBUG, USER1,
+ "Unable to segment packet");
+ rte_pktmbuf_free(pkts_burst[i]);
+ }
+ }
+
+ tx_pkts_burst = gso_segments;
+ nb_rx = nb_segments;
+ }
+
nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
- pkts_burst, nb_rx);
+ tx_pkts_burst, nb_rx);
if (nb_prep != nb_rx)
printf("Preparing packet burst to transmit failed: %s\n",
rte_strerror(rte_errno));
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
nb_prep);
/*
@@ -868,7 +922,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
rte_delay_us(burst_tx_delay_time);
nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
+ &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
}
}
fs->tx_packets += nb_tx;
@@ -881,9 +935,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (unlikely(nb_tx < nb_rx)) {
fs->fwd_dropped += (nb_rx - nb_tx);
do {
- rte_pktmbuf_free(pkts_burst[nb_tx]);
+ rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
} while (++nb_tx < nb_rx);
}
+
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
end_tsc = rte_rdtsc();
core_cycles = (end_tsc - start_tsc);