aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-07-03 15:11:03 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-07-03 15:13:07 +0100
commitbf7567fd2a5b0b28ab724046143c24561d38d015 (patch)
treed3fecf7bb6da55e6ee81f8d42110bd51c6e93631 /drivers
parent7595afa4d30097c1177b69257118d8ad89a539be (diff)
New upstream version 17.05.1
Change-Id: I8a23679edd6c9c593ceebecf7d2bf1b489e14ccb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c9
-rw-r--r--drivers/net/ark/ark_ethdev.c18
-rw-r--r--drivers/net/ark/ark_pktchkr.c2
-rw-r--r--drivers/net/ark/ark_pktgen.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c4
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c2
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c4
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c20
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h18
-rw-r--r--drivers/net/cxgbe/cxgbe.h3
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c8
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c33
-rw-r--r--drivers/net/e1000/em_ethdev.c2
-rw-r--r--drivers/net/e1000/igb_ethdev.c18
-rw-r--r--drivers/net/e1000/igb_rxtx.c6
-rw-r--r--drivers/net/ena/ena_ethdev.c2
-rw-r--r--drivers/net/enic/base/vnic_dev.c4
-rw-r--r--drivers/net/enic/enic_ethdev.c2
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c2
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/i40e_ethdev.c30
-rw-r--r--drivers/net/i40e/i40e_ethdev.h5
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c4
-rw-r--r--drivers/net/liquidio/lio_ethdev.c23
-rw-r--r--drivers/net/mlx5/mlx5.c1
-rw-r--r--drivers/net/mlx5/mlx5.h2
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c7
-rw-r--r--drivers/net/mlx5/mlx5_flow.c27
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c14
-rw-r--r--drivers/net/mlx5/mlx5_txq.c16
-rw-r--r--drivers/net/nfp/nfp_net.c2
-rw-r--r--drivers/net/qede/qede_ethdev.c4
-rw-r--r--drivers/net/qede/qede_rxtx.c4
-rw-r--r--drivers/net/qede/qede_rxtx.h3
-rw-r--r--drivers/net/ring/rte_eth_ring.c2
-rw-r--r--drivers/net/sfc/base/ef10_ev.c7
-rw-r--r--drivers/net/sfc/base/ef10_rx.c18
-rw-r--r--drivers/net/sfc/base/ef10_tx.c18
-rw-r--r--drivers/net/sfc/sfc_ethdev.c2
-rw-r--r--drivers/net/sfc/sfc_tx.c2
-rw-r--r--drivers/net/sfc/sfc_tx.h2
-rw-r--r--drivers/net/tap/tap_flow.c7
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c2
-rw-r--r--drivers/net/virtio/virtio_ethdev.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c2
47 files changed, 254 insertions, 121 deletions
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 68de45c3..9ccb7af9 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -252,8 +252,11 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* kick-off transmits */
- if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
- num_tx = 0; /* error sending -- no packets transmitted */
+ if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
+ /* error sending -- no packets transmitted */
+ num_tx = 0;
+ num_tx_bytes = 0;
+ }
pkt_q->framenum = framenum;
pkt_q->tx_pkts += num_tx;
@@ -625,6 +628,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
goto error_early;
}
(*internals)->if_name = strdup(pair->value);
+ if ((*internals)->if_name == NULL)
+ goto error_early;
(*internals)->if_index = ifr.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 995c93d3..017817e2 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -516,11 +516,7 @@ eth_ark_dev_uninit(struct rte_eth_dev *dev)
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
- if (dev->data->mac_addrs)
- rte_free(dev->data->mac_addrs);
- if (dev->data)
- rte_free(dev->data);
-
+ rte_free(dev->data->mac_addrs);
return 0;
}
@@ -588,7 +584,11 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
/* Delay packet generatpr start allow the hardware to be ready
* This is only used for sanity checking with internal generator
*/
- pthread_create(&thread, NULL, delay_pg_start, ark);
+ if (pthread_create(&thread, NULL, delay_pg_start, ark)) {
+ PMD_DRV_LOG(ERR, "Could not create pktgen "
+ "starter thread\n");
+ return -1;
+ }
}
if (ark->user_ext.dev_start)
@@ -899,6 +899,12 @@ process_file_args(const char *key, const char *value, void *extra_args)
int size = 0;
int first = 1;
+ if (file == NULL) {
+ PMD_DRV_LOG(ERR, "Unable to open "
+ "config file %s\n", value);
+ return -1;
+ }
+
while (fgets(line, sizeof(line), file)) {
size += strlen(line);
if (size >= ARK_MAX_ARG_LEN) {
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index 62b3673b..c3040af3 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -372,7 +372,7 @@ set_arg(char *arg, char *val)
o->v.INT = atoll(val);
break;
case OTSTRING:
- strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ snprintf(o->v.STR, ARK_MAX_STR_LEN, "%s", val);
break;
}
return 1;
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index bdac054e..8c7a8a2d 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -354,7 +354,7 @@ pmd_set_arg(char *arg, char *val)
o->v.INT = atoll(val);
break;
case OTSTRING:
- strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ snprintf(o->v.STR, ARK_MAX_STR_LEN, "%s", val);
break;
}
return 1;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index b79cfdb0..6d7c002f 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -681,7 +681,7 @@ static struct rte_pci_driver rte_bnx2xvf_pmd = {
RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index bb873615..81711e48 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1225,4 +1225,4 @@ static struct rte_pci_driver bnxt_rte_pmd = {
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 3849d1a6..d8987234 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -536,7 +536,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
+ if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
link_info->link_up = 1;
link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
} else {
@@ -1438,7 +1438,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
- link->link_speed = ETH_LINK_SPEED_10M;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 9dca8da1..19afdac9 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -2136,6 +2136,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -2168,6 +2169,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATTX) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCTX)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
+
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
@@ -2195,6 +2205,16 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATRX) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCRX)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 9057e409..9c132dcd 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -553,6 +553,24 @@
#define V_VF(x) ((x) << S_VF)
#define G_VF(x) (((x) >> S_VF) & M_VF)
+#define A_MPS_STAT_CTL 0x9600
+
+#define S_COUNTPAUSEMCRX 5
+#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX)
+#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U)
+
+#define S_COUNTPAUSESTATRX 4
+#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX)
+#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U)
+
+#define S_COUNTPAUSEMCTX 3
+#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX)
+#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U)
+
+#define S_COUNTPAUSESTATTX 2
+#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX)
+#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U)
+
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index 0201c990..9120c439 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,5 +59,6 @@ int setup_sge_fwevtq(struct adapter *adapter);
void cfg_queues(struct rte_eth_dev *eth_dev);
int cfg_queue_count(struct rte_eth_dev *eth_dev);
int setup_rss(struct port_info *pi);
+void cxgbe_enable_rx_queues(struct port_info *pi);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 34fed84a..598a7440 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -338,6 +338,8 @@ static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
+ cxgbe_enable_rx_queues(pi);
+
err = setup_rss(pi);
if (err)
goto out;
@@ -657,8 +659,6 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
cxgbe_stats_get(pi, &ps);
/* RX Stats */
- eth_stats->ipackets = ps.rx_frames;
- eth_stats->ibytes = ps.rx_octets;
eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
ps.rx_ovflow2 + ps.rx_ovflow3 +
ps.rx_trunc0 + ps.rx_trunc1 +
@@ -678,6 +678,8 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
eth_stats->q_ipackets[i] = rxq->stats.pkts;
eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
}
for (i = 0; i < pi->n_tx_qsets; i++) {
@@ -1061,4 +1063,4 @@ static struct rte_pci_driver rte_cxgbe_pmd = {
RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
-RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 1f230cd5..71c3671d 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -978,33 +978,22 @@ int setup_rss(struct port_info *pi)
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
-static void enable_rx(struct adapter *adap)
+static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
- struct sge *s = &adap->sge;
- struct sge_rspq *q = &s->fw_evtq;
- int i, j;
-
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
V_SEINTARM(q->intr_params) |
V_INGRESSQID(q->cntxt_id));
+}
- for_each_port(adap, i) {
- const struct port_info *pi = &adap->port[i];
- struct rte_eth_dev *eth_dev = pi->eth_dev;
-
- for (j = 0; j < eth_dev->data->nb_rx_queues; j++) {
- q = eth_dev->data->rx_queues[j];
-
- /*
- * 0-increment GTS to start the timer and enable
- * interrupts
- */
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
- V_SEINTARM(q->intr_params) |
- V_INGRESSQID(q->cntxt_id));
- }
- }
+void cxgbe_enable_rx_queues(struct port_info *pi)
+{
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i;
+
+ for (i = 0; i < pi->n_rx_qsets; i++)
+ enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
}
/**
@@ -1017,7 +1006,7 @@ static void enable_rx(struct adapter *adap)
*/
int cxgbe_up(struct adapter *adap)
{
- enable_rx(adap);
+ enable_rx(adap, &adap->sge.fw_evtq);
t4_sge_tx_monitor_start(adap);
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 57eb017c..a9bd92bc 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1867,4 +1867,4 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index e1702d8b..d18dd48e 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -3912,10 +3912,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
}
wufc = E1000_READ_REG(hw, E1000_WUFC);
- if (flex_filter->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(flex_filter->index);
- else
- reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
if (add) {
if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
@@ -3945,6 +3941,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
+ if (flex_filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(flex_filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
+
E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
(E1000_WUFC_FLX0 << flex_filter->index));
queueing = filter->len |
@@ -3973,6 +3974,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOENT;
}
+ if (it->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(it->index);
+ else
+ reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+
for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
E1000_WRITE_REG(hw, E1000_WUFC, wufc &
@@ -5418,7 +5424,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index b3b601b7..1c80a2a1 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -2402,9 +2402,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
/* Enable both L3/L4 rx checksum offload */
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
- rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
else
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 64fee05d..806073ca 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1812,4 +1812,4 @@ static struct rte_pci_driver rte_ena_pmd = {
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 84e4840a..1cd031ac 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -645,7 +645,7 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1 = 0;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
int err, i;
@@ -1021,7 +1021,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 372bae73..331cd5ef 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -651,4 +651,4 @@ static struct rte_pci_driver rte_enic_pmd = {
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a742eec1..7363defa 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -3146,4 +3146,4 @@ static struct rte_pci_driver rte_pmd_fm10k = {
RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index 3a305b67..b150fbdf 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4c49673f..fd7d3475 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -679,7 +679,7 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
@@ -2329,6 +2329,10 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+ /* exclude CRC bytes */
+ nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
+ nes->rx_broadcast) * ETHER_CRC_LEN;
+
i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
/* GLV_REPC not supported */
@@ -2348,6 +2352,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
+ /* exclude CRC bytes */
+ nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
+ nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2379,6 +2386,19 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
+ /* Get rx/tx bytes of internal transfer packets */
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
+ I40E_GLV_GORCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_rx_bytes_offset,
+ &pf->internal_rx_bytes);
+
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
+ I40E_GLV_GOTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_tx_bytes_offset,
+ &pf->internal_tx_bytes);
+
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
I40E_GLPRT_GORCL(hw->port),
@@ -2400,7 +2420,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2428,7 +2448,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -5196,6 +5216,10 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
+ pf->internal_rx_bytes = 0;
+ pf->internal_tx_bytes = 0;
+ pf->internal_rx_bytes_offset = 0;
+ pf->internal_tx_bytes_offset = 0;
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 2ff8282f..b0d963c1 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -639,6 +639,11 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
+ /* internal packet byte count, it should be excluded from the total */
+ uint64_t internal_rx_bytes;
+ uint64_t internal_tx_bytes;
+ uint64_t internal_rx_bytes_offset;
+ uint64_t internal_tx_bytes_offset;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 859b5e8f..6e5839dd 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1569,7 +1569,7 @@ static struct rte_pci_driver rte_i40evf_pmd = {
RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio-pci");
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 2083cded..aeaa432c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -8148,7 +8148,7 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index da7b1cc8..9aeb71e4 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -2647,6 +2647,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -2699,6 +2701,8 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
TAILQ_REMOVE(&filter_fdir_list,
fdir_rule_ptr, entries);
rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
}
break;
case RTE_ETH_FILTER_L2_TUNNEL:
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index 436d25b0..c7f1fb64 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -36,6 +36,7 @@
#include <rte_cycles.h>
#include <rte_malloc.h>
#include <rte_alarm.h>
+#include <rte_ether.h>
#include "lio_logs.h"
#include "lio_23xx_vf.h"
@@ -1348,7 +1349,8 @@ lio_sync_link_state_check(void *eth_dev)
static int
lio_dev_start(struct rte_eth_dev *eth_dev)
{
- uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t mtu;
+ uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
int ret = 0;
@@ -1386,12 +1388,29 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
goto dev_mtu_check_error;
}
+ if (eth_dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (frame_len <= ETHER_MAX_LEN ||
+ frame_len > LIO_MAX_RX_PKTLEN) {
+ lio_dev_err(lio_dev, "max packet length should be >= %d and < %d when jumbo frame is enabled\n",
+ ETHER_MAX_LEN, LIO_MAX_RX_PKTLEN);
+ ret = -EINVAL;
+ goto dev_mtu_check_error;
+ }
+ mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
+ } else {
+ /* default MTU */
+ mtu = ETHER_MTU;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
+ }
+
if (lio_dev->linfo.link.s.mtu != mtu) {
ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
if (ret)
goto dev_mtu_check_error;
}
+ eth_dev->data->mtu = mtu;
+
return 0;
dev_mtu_check_error:
@@ -2055,4 +2074,4 @@ static struct rte_pci_driver rte_liovf_pmd = {
RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fc99c0d5..bcb2c1b2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -789,6 +789,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
+ TAILQ_INIT(&priv->flows);
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 67fd7428..1148dee3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -155,7 +155,7 @@ struct priv {
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
- LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
+ TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index f80c58b4..c8d47489 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -144,6 +144,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fallthrough */
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
@@ -733,9 +734,11 @@ priv_fdir_disable(struct priv *priv)
/* Destroy flow director context in each RX queue. */
for (i = 0; (i != priv->rxqs_n); i++) {
- struct rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
+ struct rxq_ctrl *rxq_ctrl;
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
if (!rxq_ctrl->fdir_queue)
continue;
priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index adcbe3f5..8b3957ba 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -91,7 +91,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
void *data);
struct rte_flow {
- LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
@@ -1230,7 +1230,7 @@ mlx5_flow_create(struct rte_eth_dev *dev,
priv_lock(priv);
flow = priv_flow_create(priv, attr, items, actions, error);
if (flow) {
- LIST_INSERT_HEAD(&priv->flows, flow, next);
+ TAILQ_INSERT_TAIL(&priv->flows, flow, next);
DEBUG("Flow created %p", (void *)flow);
}
priv_unlock(priv);
@@ -1249,8 +1249,7 @@ static void
priv_flow_destroy(struct priv *priv,
struct rte_flow *flow)
{
- (void)priv;
- LIST_REMOVE(flow, next);
+ TAILQ_REMOVE(&priv->flows, flow, next);
if (flow->ibv_flow)
claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
if (flow->drop)
@@ -1275,9 +1274,9 @@ priv_flow_destroy(struct priv *priv,
*/
for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
rxq = flow->rxqs[queue_n];
- for (tmp = LIST_FIRST(&priv->flows);
+ for (tmp = TAILQ_FIRST(&priv->flows);
tmp;
- tmp = LIST_NEXT(tmp, next)) {
+ tmp = TAILQ_NEXT(tmp, next)) {
uint32_t tqueue_n;
if (tmp->drop)
@@ -1330,10 +1329,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
static void
priv_flow_flush(struct priv *priv)
{
- while (!LIST_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(&priv->flows)) {
struct rte_flow *flow;
- flow = LIST_FIRST(&priv->flows);
+ flow = TAILQ_FIRST(&priv->flows);
priv_flow_destroy(priv, flow);
}
}
@@ -1494,9 +1493,7 @@ priv_flow_stop(struct priv *priv)
{
struct rte_flow *flow;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
+ TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
if (flow->mark) {
@@ -1528,9 +1525,7 @@ priv_flow_start(struct priv *priv)
ret = priv_flow_create_drop_queue(priv);
if (ret)
return -1;
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
+ TAILQ_FOREACH(flow, &priv->flows, next) {
struct ibv_qp *qp;
if (flow->drop)
@@ -1570,9 +1565,9 @@ priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
{
struct rte_flow *flow;
- for (flow = LIST_FIRST(&priv->flows);
+ for (flow = TAILQ_FIRST(&priv->flows);
flow;
- flow = LIST_NEXT(flow, next)) {
+ flow = TAILQ_NEXT(flow, next)) {
unsigned int n;
if (flow->drop)
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 8b782336..2a268398 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -838,12 +838,16 @@ static inline int
rxq_setup(struct rxq_ctrl *tmpl)
{
struct ibv_cq *ibcq = tmpl->cq;
- struct mlx5_cq *cq = to_mxxx(cq, cq);
+ struct ibv_mlx5_cq_info cq_info;
struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
- if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
+ ERROR("Unable to query CQ info. check your OFED.");
+ return ENOTSUP;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
return EINVAL;
@@ -851,16 +855,16 @@ rxq_setup(struct rxq_ctrl *tmpl)
if (elts == NULL)
return ENOMEM;
tmpl->rxq.rq_db = rwq->rq.db;
- tmpl->rxq.cqe_n = log2above(ibcq->cqe);
+ tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->rxq.cq_ci = 0;
tmpl->rxq.rq_ci = 0;
- tmpl->rxq.cq_db = cq->dbrec;
+ tmpl->rxq.cq_db = cq_info.dbrec;
tmpl->rxq.wqes =
(volatile struct mlx5_wqe_data_seg (*)[])
(uintptr_t)rwq->rq.buff;
tmpl->rxq.cqes =
(volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq->active_buf->buf;
+ (uintptr_t)cq_info.buf;
tmpl->rxq.elts = elts;
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index de7e28be..bf72468d 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -117,7 +117,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
struct rte_mbuf *elt = (*elts)[elts_tail];
assert(elt != NULL);
- rte_pktmbuf_free(elt);
+ rte_pktmbuf_free_seg(elt);
#ifndef NDEBUG
/* Poisoning. */
memset(&(*elts)[elts_tail],
@@ -173,23 +173,27 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
{
struct mlx5_qp *qp = to_mqp(tmpl->qp);
struct ibv_cq *ibcq = tmpl->cq;
- struct mlx5_cq *cq = to_mxxx(cq, cq);
+ struct ibv_mlx5_cq_info cq_info;
- if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
+ ERROR("Unable to query CQ info. check your OFED.");
+ return ENOTSUP;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
return EINVAL;
}
- tmpl->txq.cqe_n = log2above(ibcq->cqe);
+ tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
tmpl->txq.wqes = qp->gen_data.sqstart;
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
- tmpl->txq.cq_db = cq->dbrec;
+ tmpl->txq.cq_db = cq_info.dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq->active_buf->buf;
+ (uintptr_t)cq_info.buf;
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 5c5cba19..5479fb36 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -2605,7 +2605,7 @@ static struct rte_pci_driver rte_nfp_net_pmd = {
RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio-pci");
/*
* Local variables:
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 7501eb20..9fae40b6 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -2464,7 +2464,7 @@ static struct rte_pci_driver rte_qede_pmd = {
RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index baea1bb0..f5aa43dd 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1343,7 +1343,7 @@ print_tx_bd_info(struct qede_tx_queue *txq,
if (bd1)
PMD_TX_LOG(INFO, txq,
- "BD1: nbytes=%u nbds=%u bd_flags=04%x bf=%04x",
+ "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
bd1->data.bd_flags.bitfields,
rte_cpu_to_le_16(bd1->data.bitfields));
@@ -1542,7 +1542,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (tunn_flg) {
/* First indicate its a tunnel pkt */
- bd1->data.bd_flags.bitfields |=
+ bd1->data.bitfields |=
ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index a1bbd256..21f2dacd 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -134,7 +134,8 @@
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT)
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_VXLAN)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 87d22581..d4dce957 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -230,7 +230,7 @@ eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
uint32_t index __rte_unused,
uint32_t vmdq __rte_unused)
{
- return -ENOTSUP;
+ return 0;
}
static void
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index 35226749..d9389dab 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -431,7 +431,12 @@ efx_mcdi_fini_evq(
return (0);
fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the EVQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index b65faedd..661caa88 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -137,7 +137,7 @@ efx_mcdi_fini_rxq(
efx_mcdi_execute_quiet(enp, &req);
- if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
@@ -145,7 +145,12 @@ efx_mcdi_fini_rxq(
return (0);
fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the RXQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
@@ -802,7 +807,14 @@ ef10_rx_qflush(
return (0);
fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the RXQ has already been destroyed. Callers need to know that
+ * the RXQ flush has completed to avoid waiting until timeout for a
+ * flush done event that will not be delivered.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 0f8e9b1b..211d2655 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -148,7 +148,7 @@ efx_mcdi_fini_txq(
efx_mcdi_execute_quiet(enp, &req);
- if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
@@ -156,7 +156,12 @@ efx_mcdi_fini_txq(
return (0);
fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the TXQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
@@ -675,7 +680,14 @@ ef10_tx_qflush(
return (0);
fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the TXQ has already been destroyed. Callers need to know that
+ * the TXQ flush has completed to avoid waiting until timeout for a
+ * flush done event that will not be delivered.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 4c9335f3..bdb4c466 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -1632,7 +1632,7 @@ static struct rte_pci_driver sfc_efx_pmd = {
RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index b8581d14..23230144 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -503,7 +503,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
(retry_count < SFC_TX_QFLUSH_ATTEMPTS);
++retry_count) {
if (efx_tx_qflush(txq->common) != 0) {
- txq->state |= SFC_TXQ_FLUSHING;
+ txq->state |= SFC_TXQ_FLUSH_FAILED;
break;
}
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index 6c3ac3b6..0c1c7083 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -64,6 +64,8 @@ enum sfc_txq_state_bit {
#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
SFC_TXQ_FLUSHED_BIT,
#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
+ SFC_TXQ_FLUSH_FAILED_BIT,
+#define SFC_TXQ_FLUSH_FAILED (1 << SFC_TXQ_FLUSH_FAILED_BIT)
};
/**
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index cf1c8a26..a0dd5048 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -401,9 +401,6 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- if (spec->type & mask->type)
- msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info,
- (spec->type & mask->type));
if (!is_zero_ether_addr(&spec->dst)) {
nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
@@ -508,8 +505,6 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
msg = &flow->msg;
if (!info->eth_type)
info->eth_type = htons(ETH_P_IP);
- if (!info->vlan)
- msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IP));
if (!spec)
return 0;
if (spec->hdr.dst_addr) {
@@ -566,8 +561,6 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
msg = &flow->msg;
if (!info->eth_type)
info->eth_type = htons(ETH_P_IPV6);
- if (!info->vlan)
- msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IPV6));
if (!spec)
return 0;
if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index e4910c9b..2152029b 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -2171,4 +2171,4 @@ static struct rte_pci_driver rte_nicvf_pmd = {
RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 983b95f1..88118f1b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -424,7 +424,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
}
}
- memset(mz->addr, 0, sizeof(mz->len));
+ memset(mz->addr, 0, mz->len);
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
@@ -1943,4 +1943,4 @@ __rte_unused uint8_t is_rx)
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 98252bb6..2b8092d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -1126,4 +1126,4 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");