summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-06-08 09:46:51 -0600
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-06-08 09:51:02 -0600
commitaab0c291a90f701b60f8c9a88cbcc265cba0ec8b (patch)
treebfa7bf9663cb9a518d81f18e3a2c792eee8fa1c8 /drivers
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 16.11.2
Change-Id: I947038e46a2c747296dc7aa7522239733ca2f659 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c10
-rw-r--r--drivers/crypto/qat/qat_crypto.c39
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h2
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c9
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c3
-rw-r--r--drivers/net/e1000/base/e1000_vf.c3
-rw-r--r--drivers/net/e1000/igb_ethdev.c12
-rw-r--r--drivers/net/e1000/igb_pf.c4
-rw-r--r--drivers/net/ena/base/ena_com.c2
-rw-r--r--drivers/net/ena/ena_ethdev.c18
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c9
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c16
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c2
-rw-r--r--drivers/net/i40e/i40e_ethdev.c47
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c7
-rw-r--r--drivers/net/i40e/i40e_pf.c29
-rw-r--r--drivers/net/i40e/i40e_rxtx.c29
-rw-r--r--drivers/net/i40e/i40e_rxtx.h2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c16
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c16
-rw-r--r--drivers/net/mlx4/mlx4.c19
-rw-r--r--drivers/net/mlx5/mlx5.c14
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c8
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c13
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c7
-rw-r--r--drivers/net/mlx5/mlx5_txq.c13
-rw-r--r--drivers/net/nfp/nfp_net.c7
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c2
-rw-r--r--drivers/net/qede/base/bcm_osal.c4
-rw-r--r--drivers/net/qede/qede_ethdev.c32
-rw-r--r--drivers/net/qede/qede_main.c9
-rw-r--r--drivers/net/qede/qede_rxtx.c5
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c12
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h2
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c9
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c18
-rw-r--r--drivers/net/thunderx/nicvf_struct.h12
-rw-r--r--drivers/net/virtio/virtio_ethdev.c27
-rw-r--r--drivers/net/virtio/virtio_pci.c6
-rw-r--r--drivers/net/virtio/virtio_pci.h4
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c2
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h6
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c7
-rw-r--r--drivers/net/virtio/virtqueue.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c37
54 files changed, 426 insertions, 253 deletions
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 875550c7..139fed14 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -350,9 +350,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 0
},
.aad_size = {
- .min = 8,
- .max = 12,
- .increment = 4
+ .min = 0,
+ .max = 65535,
+ .increment = 1
}
}, }
}, }
@@ -366,8 +366,8 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.block_size = 16,
.key_size = {
.min = 16,
- .max = 16,
- .increment = 0
+ .max = 32,
+ .increment = 8
},
.iv_size = {
.min = 12,
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index a4119fcd..0fe0b232 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1021,17 +1021,24 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
}
/* copy IV into request if it fits */
- if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
+ /*
+ * If IV length is zero do not copy anything but still
+ * use request descriptor embedded IV
+ *
+ */
+ if (op->sym->cipher.iv.length) {
+ if (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
}
min_ofs = cipher_ofs;
}
@@ -1062,6 +1069,12 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
}
}
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ auth_ofs = op->sym->cipher.data.offset;
+ auth_len = op->sym->cipher.data.length;
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
@@ -1283,9 +1296,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
}
stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
}
}
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 170e48fb..adf03092 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -273,6 +273,8 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_free_thresh = tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+ txq->tx_free_thresh = min(txq->tx_free_thresh,
+ txq->nb_tx_desc - BDS_PER_TX_PKT);
PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index dd251aaf..2e38ec26 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -11,7 +11,7 @@
#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_
-#define DEFAULT_TX_FREE_THRESH 512
+#define DEFAULT_TX_FREE_THRESH 64
#define RTE_PMD_BNX2X_TX_MAX_BURST 1
/**
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index a80b6fa9..aa71e3f8 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1430,9 +1430,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* If lsc interrupt is set, check initial slave's link status */
- if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ }
return 0;
}
@@ -1668,8 +1670,9 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
- internals->candidate_max_rx_pktlen : 2048;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
+ ? internals->candidate_max_rx_pktlen
+ : ETHER_MAX_JUMBO_FRAME_LEN;
dev_info->max_rx_queues = (uint16_t)128;
dev_info->max_tx_queues = (uint16_t)512;
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index c089b068..9dca8da1 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -359,6 +359,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
struct mbox_entry entry;
u32 pcie_fw = 0;
+ if (!temp)
+ return -ENOMEM;
+
if ((size & 15) || size > MBOX_LEN) {
free(temp);
return -EINVAL;
diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c
index 7845b48e..44ab0188 100644
--- a/drivers/net/e1000/base/e1000_vf.c
+++ b/drivers/net/e1000/base/e1000_vf.c
@@ -421,12 +421,13 @@ void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+
if (mc_addr_count > 30) {
msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
mc_addr_count = 30;
}
- msgbuf[0] = E1000_VF_SET_MULTICAST;
msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
for (i = 0; i < mc_addr_count; i++) {
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 2fddf0cb..be2600d4 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1012,12 +1012,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
/* Generate a random MAC address, if none was assigned by PF. */
if (is_zero_ether_addr(perm_addr)) {
eth_random_addr(perm_addr->addr_bytes);
- diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
- if (diag) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- return diag;
- }
PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x",
@@ -1029,6 +1023,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
perm_addr->addr_bytes[5]);
}
+ diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
&eth_dev->data->mac_addrs[0]);
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 5845bc22..3f8f1525 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -330,12 +330,16 @@ igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int rah;
if (is_unicast_ether_addr((struct ether_addr *)new_mac)) {
if (!is_zero_ether_addr((struct ether_addr *)new_mac))
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
sizeof(vfinfo[vf].vf_mac_addresses));
hw->mac.ops.rar_set(hw, new_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
return 0;
}
return -1;
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index bd6f3c6b..778bc2eb 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -2278,7 +2278,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret)) {
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- ret = ENA_COM_INVAL;
+ return ENA_COM_INVAL;
}
return 0;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index c1fd7bb3..1fc3654e 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -908,7 +908,7 @@ static int ena_start(struct rte_eth_dev *dev)
static int ena_queue_restart(struct ena_ring *ring)
{
- int rc;
+ int rc, bufs_num;
ena_assert_msg(ring->configured == 1,
"Trying to restart unconfigured queue\n");
@@ -919,9 +919,10 @@ static int ena_queue_restart(struct ena_ring *ring)
if (ring->type == ENA_RING_TYPE_TX)
return 0;
- rc = ena_populate_rx_queue(ring, ring->ring_size);
- if ((unsigned int)rc != ring->ring_size) {
- PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+ bufs_num = ring->ring_size - 1;
+ rc = ena_populate_rx_queue(ring, bufs_num);
+ if (rc != bufs_num) {
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
return (-1);
}
@@ -1132,7 +1133,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
return 0;
in_use = rxq->next_to_use - rxq->next_to_clean;
- ena_assert_msg(((in_use + count) <= ring_size), "bad ring state");
+ ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
count = RTE_MIN(count,
(uint16_t)(ring_size - (next_to_use & ring_mask)));
@@ -1160,6 +1161,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
&ebuf, next_to_use_masked);
if (unlikely(rc)) {
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
+ count - i);
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
break;
}
@@ -1556,12 +1559,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
recv_idx++;
}
+ rx_ring->next_to_clean = next_to_clean;
+
+ desc_in_use = desc_in_use - completed + 1;
/* Burst refill to save doorbells, memory barriers, const interval */
if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
- rx_ring->next_to_clean = next_to_clean;
-
return recv_idx;
}
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7c51d3b5..32b0ea93 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1797,7 +1797,8 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
@@ -2767,7 +2768,8 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
static void __attribute__((cold))
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
@@ -2809,7 +2811,8 @@ static void
fm10k_params_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 27f3e43f..3d216526 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -470,9 +470,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -480,11 +484,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -493,8 +499,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 26c344fd..9b5405db 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -396,6 +396,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bf7e5a05..4e4cd16a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -460,6 +460,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
@@ -2069,6 +2070,8 @@ out:
if (link.link_status == old.link_status)
return -1;
+ i40e_notify_all_vfs_link_status(dev);
+
return 0;
}
@@ -4105,6 +4108,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
/* get statistics index */
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
@@ -5504,11 +5508,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
break;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
- if (!ret) {
- i40e_notify_all_vfs_link_status(dev);
+ if (!ret)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
- }
break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
@@ -7051,7 +7053,44 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
pctype = i40e_flowtype_to_pctype(i);
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
+ reg);
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
+ }
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 640d316a..efd4fac1 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1997,6 +1997,10 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
@@ -2142,6 +2146,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
new_link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 97b8eccc..b36d9019 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -907,8 +907,33 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ I40E_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1b25b2f2..602e40c9 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -598,6 +598,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -615,9 +616,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -679,6 +681,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
@@ -707,10 +710,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
@@ -802,6 +808,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
@@ -818,8 +825,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
@@ -1717,11 +1728,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
@@ -2129,11 +2136,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ecdb13cb..635ed48b 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -113,11 +113,11 @@ struct i40e_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc */
struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< number of staged packets ready */
uint16_t rx_next_avail; /**< index of next staged packets */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 990520f3..23b4e352 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -243,6 +243,10 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (rxmode->header_split == 1)
return -1;
+ /* no QinQ support */
+ if (rxmode->hw_vlan_extend == 1)
+ return -1;
+
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 011c54e0..d235daa7 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -205,7 +205,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
for (i = 0; i < 4; i++) {
tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
ptype = vgetq_lane_u8(tmp, 8);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
}
}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index b95cc8e1..9644dd61 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -320,20 +320,26 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -342,8 +348,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 43c55d74..cf9d51dc 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -1542,16 +1542,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
@@ -1804,16 +1798,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("QSFP module not supported\n");
@@ -1838,7 +1826,6 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
-
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index bac36e0d..d6686f6c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2991,6 +2991,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -4089,6 +4090,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4099,7 +4101,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -5722,6 +5724,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5731,7 +5734,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -7529,7 +7532,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e41..09440ccb 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -387,15 +387,27 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c61ce470..d1e300a5 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -3323,7 +3323,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -3343,18 +3342,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
- /* Disable drop for all queues in VMDQ mode*/
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
- } else {
- /* Enable drop for all queues in SRIOV mode */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
- }
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3488,16 +3475,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
@@ -3535,6 +3524,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3647,7 +3651,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3706,6 +3710,15 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
@@ -4083,9 +4096,8 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
@@ -4093,10 +4105,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_config_vf_rss(dev);
break;
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index abbf2841..dd7d1778 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -335,9 +335,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -345,11 +349,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -358,8 +364,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 6d43a977..83f9143b 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -3340,6 +3340,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Increase out of memory counters. */
++rxq->stats.rx_nombuf;
++rxq->priv->dev->data->rx_mbuf_alloc_failed;
+ /* Add SGE to array for repost. */
+ sges[i] = elt->sge;
goto repost;
}
@@ -5571,10 +5573,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -5606,11 +5606,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx4_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx4_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
@@ -5857,6 +5857,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ /* Update link status once if waiting for LSC. */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ mlx4_link_update(eth_dev, 0);
continue;
port_error:
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cb45fd0f..aa9d2dcb 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -384,10 +384,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -427,11 +425,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx5_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx5_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 06cfd016..0aa274e6 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -665,10 +665,10 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_INNER_L3_IPV4,
- RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 28e93d3e..118f6d67 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1247,6 +1247,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->rxqs)[idx] = NULL;
rxq_cleanup(rxq_ctrl);
+ /* Resize if rxq size is changed. */
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ rxq_ctrl = rte_realloc(rxq_ctrl,
+ sizeof(*rxq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
desc * sizeof(struct rte_mbuf *),
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 3997b27a..58926e39 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -431,7 +431,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#ifdef MLX5_PMD_SOFT_COUNTERS
total_length = length;
#endif
- assert(length >= MLX5_WQE_DWORD_SIZE);
+ if (length < (MLX5_WQE_DWORD_SIZE + 2))
+ break;
/* Update element. */
(*txq->elts)[elts_head] = buf;
elts_head = (elts_head + 1) & (elts_n - 1);
@@ -1290,7 +1291,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
unsigned int i = 0;
unsigned int rq_ci = rxq->rq_ci << sges_n;
- int len; /* keep its value across iterations. */
+ int len = 0; /* keep its value across iterations. */
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
@@ -1356,7 +1357,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->ol_flags |=
rxq_cq_to_ol_flags(rxq, cqe);
}
- if (cqe->hdr_type_etc &
+ if (ntohs(cqe->hdr_type_etc) &
MLX5_CQE_VLAN_STRIPPED) {
pkt->ol_flags |= PKT_RX_VLAN_PKT |
PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 439908fc..c2863671 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -496,6 +496,19 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->txqs)[idx] = NULL;
txq_cleanup(txq_ctrl);
+ /* Resize if txq size is changed. */
+ if (txq_ctrl->txq.elts_n != log2above(desc)) {
+ txq_ctrl = rte_realloc(txq_ctrl,
+ sizeof(*txq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
txq_ctrl =
rte_calloc_socket("TXQ", 1,
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 099d82b3..49c52930 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -1916,7 +1916,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct nfp_net_tx_desc *txds;
struct rte_mbuf *pkt;
uint64_t dma_addr;
- int pkt_size, dma_size;
+ int pkt_size, pkt_len, dma_size;
uint16_t free_descs, issued_descs;
struct rte_mbuf **lmbuf;
int i;
@@ -1964,6 +1964,8 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* Checksum and VLAN flags just in the first descriptor for a
* multisegment packet
*/
+
+ txds->data_len = pkt->pkt_len;
nfp_net_tx_cksum(txq, txds, pkt);
if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
@@ -1981,6 +1983,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* then data_len = pkt_len
*/
pkt_size = pkt->pkt_len;
+ pkt_len = pkt->pkt_len;
/* Releasing mbuf which was prefetched above */
if (*lmbuf)
@@ -1999,7 +2002,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Filling descriptors fields */
txds->dma_len = dma_size;
- txds->data_len = pkt->pkt_len;
+ txds->data_len = pkt_len;
txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
txds->dma_addr_lo = (dma_addr & 0xffffffff);
ASSERT(free_descs > 0);
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 57b0b315..1a208ffc 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -294,9 +294,9 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
- rte_pktmbuf_free(mbuf);
num_tx++;
tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
}
/*
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 28be9587..3f895cd4 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -98,9 +98,7 @@ inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
u32 nwords = 0;
OSAL_BUILD_BUG_ON(!limit);
nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
- for (i = 0; i < nwords; i++)
- if (~(addr[i] != 0))
- break;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6d6fb9de..23221478 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -433,8 +433,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast;
- int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
@@ -444,29 +442,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return;
}
- /* First remove the primary mac */
- qede_set_ucast_cmn_params(&ucast);
- ucast.opcode = ECORE_FILTER_REMOVE;
- ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy(&qdev->primary_mac,
- (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0) {
- DP_ERR(edev, "Unable to remove current macaddr"
- " Reverting to previous default mac\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
- }
-
- /* Add new MAC */
- ucast.opcode = ECORE_FILTER_ADD;
- ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0)
- DP_ERR(edev, "Unable to add new default mac\n");
- else
- ether_addr_copy(mac_addr, &qdev->primary_mac);
+ qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
@@ -741,10 +717,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
SLIST_INIT(&qdev->vlan_list_head);
- /* Add primary mac for PF */
- if (IS_PF(edev))
- qede_mac_addr_set(eth_dev, &qdev->primary_mac);
-
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
ETH_VLAN_FILTER_MASK |
@@ -1286,6 +1258,8 @@ void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index b666e1c7..a6d8ef4e 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -345,11 +345,12 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+
if (IS_PF(edev)) {
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = edev->mf_mode;
dev_info->tx_switching = false;
} else {
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index a34b6659..9cce13df 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1318,10 +1318,6 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
- /* Reset ring */
- if (qede_reset_fp_rings(qdev))
- return -ENOMEM;
-
/* Start/resume traffic */
qdev->ops->fastpath_start(edev);
@@ -1490,6 +1486,7 @@ int qede_reset_fp_rings(struct qede_dev *qdev)
}
}
}
+ qede_reset_fp_rings(qdev);
return 0;
}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 9e028a3a..49a2646d 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -37,7 +37,7 @@
#include "nicvf_bsvf.h"
#include "nicvf_plat.h"
-static SIMPLEQ_HEAD(, svf_entry) head = SIMPLEQ_HEAD_INITIALIZER(head);
+static STAILQ_HEAD(, svf_entry) head = STAILQ_HEAD_INITIALIZER(head);
void
nicvf_bsvf_push(struct svf_entry *entry)
@@ -45,7 +45,7 @@ nicvf_bsvf_push(struct svf_entry *entry)
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_INSERT_TAIL(&head, entry, next);
+ STAILQ_INSERT_TAIL(&head, entry, next);
}
struct svf_entry *
@@ -53,14 +53,14 @@ nicvf_bsvf_pop(void)
{
struct svf_entry *entry;
- assert(!SIMPLEQ_EMPTY(&head));
+ assert(!STAILQ_EMPTY(&head));
- entry = SIMPLEQ_FIRST(&head);
+ entry = STAILQ_FIRST(&head);
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_REMOVE_HEAD(&head, next);
+ STAILQ_REMOVE_HEAD(&head, next);
return entry;
}
@@ -68,5 +68,5 @@ nicvf_bsvf_pop(void)
int
nicvf_bsvf_empty(void)
{
- return SIMPLEQ_EMPTY(&head);
+ return STAILQ_EMPTY(&head);
}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index 5d5a25e2..fb9b2484 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -41,7 +41,7 @@ struct nicvf;
* The base queue structure to hold secondary qsets.
*/
struct svf_entry {
- SIMPLEQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */
struct nicvf *vf; /**< Holder of a secondary qset */
};
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 466e49ce..2da5af04 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -41,7 +41,6 @@
#include <inttypes.h>
#include <netinet/in.h>
#include <sys/queue.h>
-#include <sys/timerfd.h>
#include <rte_alarm.h>
#include <rte_atomic.h>
@@ -245,7 +244,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
@@ -258,7 +257,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
@@ -277,7 +276,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(snic, &rx_qstats,
@@ -290,7 +289,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nicvf_tx_range(dev, snic, &tx_start, &tx_end);
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(snic, &tx_qstats,
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fc43b747..87e9de1a 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -469,11 +469,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
@@ -563,11 +562,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index c900e121..5bc6d577 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -43,8 +43,8 @@
#include <rte_memory.h>
struct nicvf_rbdr {
- uint64_t rbdr_status;
- uint64_t rbdr_door;
+ uintptr_t rbdr_status;
+ uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
nicvf_phys_addr_t phys;
uint32_t buffsz;
@@ -58,8 +58,8 @@ struct nicvf_txq {
union sq_entry_t *desc;
nicvf_phys_addr_t phys;
struct rte_mbuf **txbuffs;
- uint64_t sq_head;
- uint64_t sq_door;
+ uintptr_t sq_head;
+ uintptr_t sq_door;
struct rte_mempool *pool;
struct nicvf *nic;
void (*pool_free)(struct nicvf_txq *sq);
@@ -74,8 +74,8 @@ struct nicvf_txq {
struct nicvf_rxq {
uint64_t mbuf_phys_off;
- uint64_t cq_status;
- uint64_t cq_door;
+ uintptr_t cq_status;
+ uintptr_t cq_door;
nicvf_phys_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f5961ab7..452ad2a5 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -545,6 +545,9 @@ virtio_free_queues(struct virtio_hw *hw)
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
@@ -563,9 +566,11 @@ virtio_free_queues(struct virtio_hw *hw)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
@@ -1210,11 +1215,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- /* If host does not support status then disable LSC */
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- else
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rx_func_get(eth_dev);
@@ -1550,9 +1555,6 @@ virtio_dev_start(struct rte_eth_dev *dev)
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
@@ -1582,6 +1584,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
@@ -1636,6 +1643,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
PMD_INIT_LOG(DEBUG, "stop");
@@ -1643,6 +1651,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc)
rte_intr_disable(&dev->pci_dev->intr_handle);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
}
@@ -1659,7 +1668,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = SPEED_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 8d5355c7..f6d697f3 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -48,6 +48,7 @@
*/
#define PCI_CAPABILITY_LIST 0x34
#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
/*
* The remaining space is defined by each driver as the per-driver
@@ -517,7 +518,7 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
{
- io_write16(1, vq->notify_addr);
+ io_write16(vq->vq_queue_index, vq->notify_addr);
}
const struct virtio_pci_ops modern_ops = {
@@ -670,6 +671,9 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
break;
}
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX)
+ hw->use_msix = 1;
+
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
"[%2x] skipping non VNDR cap id: %02x",
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 511a1c87..40f7e424 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -160,7 +160,8 @@ struct virtnet_ctl;
/*
* Maximum number of virtqueues per device.
*/
-#define VIRTIO_MAX_VIRTQUEUES 8
+#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
+#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
/* Common configuration */
#define VIRTIO_PCI_CAP_COMMON_CFG 1
@@ -248,6 +249,7 @@ struct virtio_hw {
uint64_t req_guest_features;
uint64_t guest_features;
uint32_t max_queue_pairs;
+ uint16_t started;
uint16_t vtnet_hdr_size;
uint8_t vlan_strip;
uint8_t use_msix;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index a38398b8..91f6a59a 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -230,7 +230,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
parse_mac(dev, mac);
dev->vhostfd = -1;
- for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
dev->kickfds[i] = -1;
dev->callfds[i] = -1;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 28fc788e..1326b912 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -40,8 +40,8 @@
struct virtio_user_dev {
int vhostfd;
- int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
- int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ int callfds[VIRTIO_MAX_VIRTQUEUES];
+ int kickfds[VIRTIO_MAX_VIRTQUEUES];
int mac_specified;
uint32_t max_queue_pairs;
uint32_t queue_pairs;
@@ -53,7 +53,7 @@ struct virtio_user_dev {
uint8_t status;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
};
int virtio_user_start_device(struct virtio_user_dev *dev);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 013600e4..f018724f 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -416,6 +416,13 @@ virtio_user_pmd_probe(const char *name, const char *params)
goto end;
}
+ if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
+ PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
+ VIRTIO_USER_ARG_QUEUES_NUM, queues,
+ VIRTIO_MAX_VIRTQUEUE_PAIRS);
+ goto end;
+ }
+
eth_dev = virtio_user_eth_dev_alloc(name);
if (!eth_dev) {
PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index b1070e05..569c251c 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -71,8 +71,14 @@ struct rte_mbuf;
/**
* Return the physical address (or virtual address in case of
* virtio-user) of mbuf data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
*/
-#define VIRTIO_MBUF_ADDR(mb, vq) (*(uint64_t *)((uintptr_t)(mb) + (vq)->offset))
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 8bb13e52..f123df90 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -771,7 +771,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old, link;
+ struct rte_eth_link old = { 0 }, link;
uint32_t ret;
/* Link status doesn't change for stopped dev */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index b50d2b00..a3d330cc 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -138,6 +138,7 @@ typedef struct vmxnet3_tx_queue {
uint32_t qid;
struct Vmxnet3_TxQueueDesc *shared;
struct vmxnet3_txq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
uint8_t port_id; /**< Device port identifier. */
@@ -161,6 +162,7 @@ typedef struct vmxnet3_rx_queue {
struct rte_mbuf *start_seg;
struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
uint8_t port_id; /**< Device port identifier. */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 93db10fb..3ded18ee 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -192,6 +192,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
}
}
@@ -209,6 +211,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
}
}
@@ -816,30 +821,6 @@ rcd_done:
return nb_rx;
}
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -907,11 +888,13 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ txq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring initialization */
@@ -1009,11 +992,13 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
- mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ rxq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring0 initialization */