summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/igb_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/igb_rxtx.c')
-rw-r--r--drivers/net/e1000/igb_rxtx.c239
1 files changed, 231 insertions, 8 deletions
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index dbd37acc..b3b601b7 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,18 +65,28 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IGB_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+ PKT_TX_TCP_SEG | \
+ IGB_TX_IEEE1588_TMST)
+
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@@ -605,7 +615,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set the Transmit Descriptor Tail (TDT).
*/
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
@@ -616,6 +626,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1227,6 +1283,132 @@ eth_igb_tx_queue_release(void *txq)
igb_tx_queue_release(txq);
}
+static int
+igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+{
+ struct igb_tx_entry *sw_ring;
+ volatile union e1000_adv_tx_desc *txr;
+ uint16_t tx_first; /* First segment analyzed. */
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_last; /* Last segment in the current packet. */
+ uint16_t tx_next; /* First segment of the next packet. */
+ int count;
+
+ if (txq != NULL) {
+ count = 0;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+
+ /*
+ * tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
+
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
+
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
+
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /*
+ * Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /*
+ * Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /*
+ * Loop through all segments in a
+ * packet.
+ */
+ do {
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ } while (tx_id != tx_next);
+
+ if (unlikely(count == (int)free_cnt))
+ break;
+ } else
+ /*
+ * mbuf still in use, nothing left to
+ * free.
+ */
+ break;
+ } else {
+ /*
+ * There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely((tx_id == tx_first) && (count != 0)))
+ break;
+
+ /*
+ * Walk the list and find the next mbuf, if any.
+ */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ if (sw_ring[tx_id].mbuf)
+ break;
+
+ } while (tx_id != tx_first);
+
+ /*
+ * Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+ }
+ }
+ } else
+ count = -ENODEV;
+
+ return count;
+}
+
+int
+eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ return igb_tx_done_cleanup(txq, free_cnt);
+}
+
static void
igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
{
@@ -1364,6 +1546,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
igb_reset_tx_queue(txq, dev);
dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = &eth_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
return 0;
@@ -1512,11 +1695,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -1549,6 +1727,51 @@ eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
+int
+eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct igb_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{