summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/e1000
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/Makefile5
-rw-r--r--drivers/net/e1000/em_ethdev.c28
-rw-r--r--drivers/net/e1000/em_rxtx.c22
-rw-r--r--drivers/net/e1000/igb_ethdev.c56
-rw-r--r--drivers/net/e1000/igb_flow.c9
-rw-r--r--drivers/net/e1000/igb_pf.c1
-rw-r--r--drivers/net/e1000/igb_rxtx.c73
7 files changed, 136 insertions, 58 deletions
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ffdf36d3..3f0344b4 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -38,6 +38,9 @@ LIB = librte_pmd_e1000.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
EXPORT_MAP := rte_pmd_e1000_version.map
@@ -68,7 +71,7 @@ endif
# Add extra flags for base driver files (also known as shared code)
# to disable warnings in them
#
-BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
VPATH += $(SRCDIR)/base
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 3d4ab936..a0c3b4dc 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -43,11 +43,11 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -72,7 +72,7 @@ static void eth_em_allmulticast_enable(struct rte_eth_dev *dev);
static void eth_em_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_em_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void eth_em_stats_get(struct rte_eth_dev *dev,
+static int eth_em_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static void eth_em_stats_reset(struct rte_eth_dev *dev);
static void eth_em_infos_get(struct rte_eth_dev *dev,
@@ -99,7 +99,7 @@ static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int eth_em_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev);
static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev);
static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev);
@@ -341,7 +341,6 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
@@ -432,7 +431,8 @@ static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_em_pmd = {
.id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_em_pci_probe,
.remove = eth_em_pci_remove,
};
@@ -668,7 +668,12 @@ eth_em_start(struct rte_eth_dev *dev)
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
- eth_em_vlan_offload_set(dev, mask);
+ ret = eth_em_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to update vlan offload");
+ em_dev_clear_queues(dev);
+ return ret;
+ }
/* Set Interrupt Throttling Rate to maximum allowed value. */
E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);
@@ -906,7 +911,7 @@ em_hardware_init(struct e1000_hw *hw)
}
/* This function is based on em_update_stats_counters() in e1000/if_em.c */
-static void
+static int
eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1006,7 +1011,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
}
if (rte_stats == NULL)
- return;
+ return -EINVAL;
/* Rx Errors */
rte_stats->imissed = stats->mpc;
@@ -1021,6 +1026,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
+ return 0;
}
static void
@@ -1447,7 +1453,7 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL, reg);
}
-static void
+static int
eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if(mask & ETH_VLAN_STRIP_MASK){
@@ -1463,6 +1469,8 @@ eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
em_vlan_hw_filter_disable(dev);
}
+
+ return 0;
}
/*
@@ -1624,7 +1632,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
rte_em_dev_atomic_read_link_status(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
- dev->data->port_id, (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 31819c5b..1d8f0794 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -119,7 +119,7 @@ struct em_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -186,7 +186,7 @@ struct em_tx_queue {
/** Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -577,7 +577,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
@@ -675,7 +675,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
@@ -799,7 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
@@ -830,7 +830,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->ol_flags = rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
@@ -979,7 +979,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
@@ -1056,7 +1056,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
@@ -1289,7 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1416,7 +1416,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1652,7 +1652,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index e4f7a9fa..fdc139f3 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -43,11 +43,11 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -112,7 +112,7 @@ static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_igb_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
-static void eth_igb_stats_get(struct rte_eth_dev *dev,
+static int eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igb_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -157,7 +157,7 @@ static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid_id);
-static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
@@ -188,7 +188,7 @@ static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
static int eth_igbvf_link_update(struct e1000_hw *hw);
-static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
+static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -334,6 +334,8 @@ static const struct rte_pci_id pci_id_igb_map[] = {
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
@@ -816,7 +818,6 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
@@ -1051,7 +1052,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1166,7 +1166,8 @@ static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_driver rte_igb_pmd = {
.id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_igb_pci_probe,
.remove = eth_igb_pci_remove,
};
@@ -1189,7 +1190,7 @@ static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
*/
static struct rte_pci_driver rte_igbvf_pmd = {
.id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_igbvf_pci_probe,
.remove = eth_igbvf_pci_remove,
};
@@ -1400,7 +1401,12 @@ eth_igb_start(struct rte_eth_dev *dev)
*/
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
ETH_VLAN_EXTEND_MASK;
- eth_igb_vlan_offload_set(dev, mask);
+ ret = eth_igb_vlan_offload_set(dev, mask);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set vlan offload");
+ igb_dev_clear_queues(dev);
+ return ret;
+ }
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
/* Enable VLAN filter since VMDq always use VLAN filter */
@@ -1828,7 +1834,7 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
}
-static void
+static int
eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1838,7 +1844,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
igb_read_stats_registers(hw, stats);
if (rte_stats == NULL)
- return;
+ return -EINVAL;
/* Rx Errors */
rte_stats->imissed = stats->mpc;
@@ -1853,6 +1859,7 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->opackets = stats->gptc;
rte_stats->ibytes = stats->gorc;
rte_stats->obytes = stats->gotc;
+ return 0;
}
static void
@@ -2093,7 +2100,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return IGBVF_NB_XSTATS;
}
-static void
+static int
eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2103,12 +2110,13 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
igbvf_read_stats_registers(hw, hw_stats);
if (rte_stats == NULL)
- return;
+ return -EINVAL;
rte_stats->ipackets = hw_stats->gprc;
rte_stats->ibytes = hw_stats->gorc;
rte_stats->opackets = hw_stats->gptc;
rte_stats->obytes = hw_stats->gotc;
+ return 0;
}
static void
@@ -2715,7 +2723,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2 * VLAN_TAG_SIZE);
}
-static void
+static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if(mask & ETH_VLAN_STRIP_MASK){
@@ -2738,6 +2746,8 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
else
igb_vlan_hw_extend_disable(dev);
}
+
+ return 0;
}
@@ -4094,7 +4104,7 @@ eth_igb_get_flex_filter(struct rte_eth_dev *dev,
flex_filter.filter_info.priority = filter->priority;
memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
memcpy(flex_filter.filter_info.mask, filter->mask,
- RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
+ RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
&flex_filter.filter_info);
@@ -5382,7 +5392,14 @@ eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t mask = 1 << queue_id;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
E1000_WRITE_REG(hw, E1000_EIMC, mask);
E1000_WRITE_FLUSH(hw);
@@ -5397,7 +5414,12 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask = 1 << queue_id;
+ uint32_t vec = E1000_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = E1000_RX_VEC_START;
+
+ uint32_t mask = 1 << (queue_id + vec);
uint32_t regval;
regval = E1000_READ_REG(hw, E1000_EIMS);
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index ed2ecc40..22bad265 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -47,7 +47,6 @@
#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
@@ -1346,7 +1345,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
sizeof(struct igb_ntuple_filter_ele), 0);
- (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
@@ -1368,7 +1367,7 @@ igb_flow_create(struct rte_eth_dev *dev,
ethertype_filter_ptr = rte_zmalloc(
"igb_ethertype_filter",
sizeof(struct igb_ethertype_filter_ele), 0);
- (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ rte_memcpy(&ethertype_filter_ptr->filter_info,
&ethertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
@@ -1388,7 +1387,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
syn_filter_ptr = rte_zmalloc("igb_syn_filter",
sizeof(struct igb_eth_syn_filter_ele), 0);
- (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&igb_filter_syn_list,
@@ -1409,7 +1408,7 @@ igb_flow_create(struct rte_eth_dev *dev,
if (!ret) {
flex_filter_ptr = rte_zmalloc("igb_flex_filter",
sizeof(struct igb_flex_filter_ele), 0);
- (void)rte_memcpy(&flex_filter_ptr->filter_info,
+ rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
sizeof(struct rte_eth_flex_filter));
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 6809d30c..cd6ae2fb 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -39,6 +39,7 @@
#include <stdarg.h>
#include <inttypes.h>
+#include <rte_bus_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 1c80a2a1..4ee12e9e 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -105,6 +105,13 @@ struct igb_tx_entry {
};
/**
+ * rx queue flags
+ */
+enum igb_rxq_flags {
+ IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
+};
+
+/**
* Structure associated with each RX queue.
*/
struct igb_rx_queue {
@@ -122,12 +129,13 @@ struct igb_rx_queue {
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint32_t flags; /**< RX flags. */
};
/**
@@ -191,7 +199,7 @@ struct igb_tx_queue {
/**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
@@ -589,7 +597,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
@@ -785,7 +793,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
@@ -917,7 +925,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -946,9 +954,17 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
+ */
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
@@ -1103,7 +1119,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
@@ -1180,10 +1196,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
+ * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field and must be in CPU byte order.
*/
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
+ (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
+ first_seg->vlan_tci =
+ rte_be_to_cpu_16(rxd.wb.upper.vlan);
+ } else {
+ first_seg->vlan_tci =
+ rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ }
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
@@ -1530,7 +1553,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -1667,7 +1690,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rz->iova;
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
@@ -2180,7 +2203,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
@@ -2278,6 +2301,17 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
+ rxq->flags = 0;
+ /*
+ * i350 and i354 vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
if (ret)
@@ -2557,6 +2591,17 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
+ rxq->flags = 0;
+ /*
+ * i350VF LB vlan packets have vlan tags byte swapped.
+ */
+ if (hw->mac.type == e1000_vfadapt_i350) {
+ rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
+ } else {
+ PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
+ }
+
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
if (ret)