diff options
Diffstat (limited to 'examples/packet_ordering/main.c')
-rw-r--r-- | examples/packet_ordering/main.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c index 3c88b86e..49ae35b8 100644 --- a/examples/packet_ordering/main.c +++ b/examples/packet_ordering/main.c @@ -216,7 +216,7 @@ parse_args(int argc, char **argv) } argv[optind-1] = prgname; - optind = 0; /* reset getopt lib */ + optind = 1; /* reset getopt lib */ return 0; } @@ -229,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count, /* free the mbufs which failed from transmit */ app_stats.tx.ro_tx_failed_pkts += count; - RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); + RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); pktmbuf_free_bulk(unsent, count); } @@ -410,7 +410,7 @@ rx_thread(struct rte_ring *ring_out) nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, MAX_PKTS_BURST); if (nb_rx_pkts == 0) { - RTE_LOG(DEBUG, REORDERAPP, + RTE_LOG_DP(DEBUG, REORDERAPP, "%s():Received zero packets\n", __func__); continue; } @@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out) pkts[i++]->seqn = seqn++; /* enqueue to rx_to_workers ring */ - ret = rte_ring_enqueue_burst(ring_out, (void *) pkts, - nb_rx_pkts); + ret = rte_ring_enqueue_burst(ring_out, + (void *)pkts, nb_rx_pkts, NULL); app_stats.rx.enqueue_pkts += ret; if (unlikely(ret < nb_rx_pkts)) { app_stats.rx.enqueue_failed_pkts += @@ -462,7 +462,7 @@ worker_thread(void *args_ptr) /* dequeue the mbufs from rx_to_workers ring */ burst_size = rte_ring_dequeue_burst(ring_in, - (void *)burst_buffer, MAX_PKTS_BURST); + (void *)burst_buffer, MAX_PKTS_BURST, NULL); if (unlikely(burst_size == 0)) continue; @@ -473,7 +473,8 @@ worker_thread(void *args_ptr) burst_buffer[i++]->port ^= xor_val; /* enqueue the modified mbufs to workers_to_tx ring */ - ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size); + ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, + burst_size, NULL); __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret); if (unlikely(ret < burst_size)) { /* Return the mbufs to their respective pool, dropping packets */ @@ -509,7 +510,7 @@ send_thread(struct send_thread_args *args) /* deque the mbufs from workers_to_tx ring */ nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in, - (void *)mbufs, MAX_PKTS_BURST); + (void *)mbufs, MAX_PKTS_BURST, NULL); if (unlikely(nb_dq_mbufs == 0)) continue; @@ -522,7 +523,7 @@ send_thread(struct send_thread_args *args) if (ret == -1 && rte_errno == ERANGE) { /* Too early pkts should be transmitted out directly */ - RTE_LOG(DEBUG, REORDERAPP, + RTE_LOG_DP(DEBUG, REORDERAPP, "%s():Cannot reorder early packet " "direct enqueuing to TX\n", __func__); outp = mbufs[i]->port; @@ -594,7 +595,7 @@ tx_thread(struct rte_ring *ring_in) /* deque the mbufs from workers_to_tx ring */ dqnum = rte_ring_dequeue_burst(ring_in, - (void *)mbufs, MAX_PKTS_BURST); + (void *)mbufs, MAX_PKTS_BURST, NULL); if (unlikely(dqnum == 0)) continue; |