summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/em_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/em_rxtx.c')
-rw-r--r--drivers/net/e1000/em_rxtx.c113
1 files changed, 99 insertions, 14 deletions
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae100..7d2ac4eb 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
@@ -163,6 +164,7 @@ struct em_tx_queue {
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
@@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq)
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
}
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ RTE_SET_USED(dev);
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_queue_offload_capa;
+
+ /*
+ * As only one Tx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+ return tx_queue_offload_capa;
+}
+
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
return 0;
}
@@ -1313,6 +1349,44 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
+uint64_t
+em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+ uint32_t max_rx_pktlen;
+
+ max_rx_pktlen = em_get_max_pktlen(dev);
+
+ rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER;
+ if (max_rx_pktlen > ETHER_MAX_LEN)
+ rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1325,9 +1399,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1382,8 +1459,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1474,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
+ rxq->offloads = offloads;
return 0;
}
@@ -1646,6 +1726,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode;
uint32_t rctl;
uint32_t rfctl;
uint32_t rxcsum;
@@ -1654,6 +1735,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Make sure receives are disabled while setting
@@ -1713,9 +1795,10 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1828,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
rctl_bsize < ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1851,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,24 +1863,24 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
- dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
- rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- else
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ else
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
@@ -1814,7 +1897,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1977,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -1911,4 +1995,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
}