From 47d9763a1dd3103d732da9eec350cfc1cd784717 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Fri, 8 Dec 2017 17:16:13 +0000 Subject: New upstream version 16.11.4 Change-Id: I733e0292d2e060161d148b3e114065d00b36d2ba Signed-off-by: Luca Boccassi --- drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 2 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 2 +- drivers/crypto/kasumi/rte_kasumi_pmd_ops.c | 2 +- drivers/crypto/null/null_crypto_pmd_ops.c | 2 +- drivers/crypto/openssl/rte_openssl_pmd_ops.c | 2 +- drivers/crypto/qat/qat_crypto.c | 2 +- drivers/crypto/snow3g/rte_snow3g_pmd_ops.c | 2 +- drivers/crypto/zuc/rte_zuc_pmd_ops.c | 2 +- drivers/net/bnxt/bnxt.h | 2 + drivers/net/bnxt/bnxt_cpr.c | 4 +- drivers/net/bnxt/bnxt_ethdev.c | 47 ++++++----- drivers/net/bnxt/bnxt_hwrm.c | 64 ++++++++------ drivers/net/bnxt/bnxt_irq.c | 11 ++- drivers/net/bnxt/bnxt_rxr.c | 11 +++ drivers/net/bnxt/bnxt_rxr.h | 16 ++++ drivers/net/bnxt/bnxt_txr.c | 32 +++++-- drivers/net/bnxt/bnxt_txr.h | 21 +++++ drivers/net/bonding/rte_eth_bond_8023ad.c | 37 ++++----- drivers/net/cxgbe/base/t4_hw.c | 3 + drivers/net/e1000/igb_ethdev.c | 18 +++- drivers/net/enic/enic_main.c | 6 +- drivers/net/i40e/base/i40e_osdep.h | 2 +- drivers/net/i40e/i40e_ethdev.c | 93 +++++++++++++-------- drivers/net/i40e/i40e_ethdev_vf.c | 101 ++++++++++++++++++++--- drivers/net/i40e/i40e_pf.c | 14 +++- drivers/net/i40e/i40e_rxtx.c | 110 +++++++++++++++---------- drivers/net/i40e/i40e_rxtx_vec_neon.c | 19 +++-- drivers/net/ixgbe/ixgbe_ethdev.c | 44 +++++++--- drivers/net/ixgbe/ixgbe_pf.c | 2 +- drivers/net/ixgbe/ixgbe_rxtx.c | 41 +++++++-- drivers/net/mlx5/mlx5.c | 7 +- drivers/net/mlx5/mlx5_ethdev.c | 81 +++++++++--------- drivers/net/mlx5/mlx5_rxq.c | 6 +- drivers/net/mlx5/mlx5_rxtx.c | 12 ++- drivers/net/mlx5/mlx5_rxtx.h | 2 +- drivers/net/mlx5/mlx5_stats.c | 4 +- drivers/net/mlx5/mlx5_utils.h | 4 + drivers/net/nfp/nfp_net.c | 11 ++- drivers/net/pcap/rte_eth_pcap.c | 2 + drivers/net/qede/Makefile | 5 +- drivers/net/qede/base/ecore.h | 79 +++++++++--------- drivers/net/qede/base/ecore_cxt.c | 1 + drivers/net/qede/base/ecore_dcbx.c | 12 ++- drivers/net/qede/base/ecore_dev.c | 29 +++---- drivers/net/qede/base/ecore_hw.c | 2 +- drivers/net/qede/base/ecore_hw.h | 11 --- drivers/net/qede/base/ecore_mcp.c | 2 +- drivers/net/qede/base/ecore_sriov.c | 21 +++-- drivers/net/qede/base/ecore_vf.c | 2 +- drivers/net/virtio/virtio_ethdev.c | 15 ++++ drivers/net/virtio/virtio_ethdev.h | 6 ++ drivers/net/virtio/virtio_rxtx.c | 48 ++++++++--- drivers/net/virtio/virtio_rxtx_simple.c | 2 + drivers/net/virtio/virtio_user/vhost_user.c | 4 + drivers/net/virtio/virtqueue.c | 25 ++++++ drivers/net/virtio/virtqueue.h | 5 ++ drivers/net/vmxnet3/vmxnet3_ethdev.c | 6 +- drivers/net/vmxnet3/vmxnet3_rxtx.c | 13 +-- 58 files changed, 769 insertions(+), 362 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index c51f82a8..d4ff651f 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -184,7 +184,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "aesni_gcm_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 287c8a50..e531c880 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -333,7 +333,7 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "aesni_mb_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c index b9285a43..8f8695d8 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -186,7 +186,7 @@ kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "kasumi_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c index 26ff6319..421f21e0 100644 --- a/drivers/crypto/null/null_crypto_pmd_ops.c +++ b/drivers/crypto/null/null_crypto_pmd_ops.c @@ -178,7 +178,7 @@ null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "null_crypto_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c index a072e6e3..7bf82e1a 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -543,7 +543,7 @@ openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "openssl_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index f7fcecea..8b830b80 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -162,7 +162,7 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .key_size = { .min = 1, .max = 128, - .increment = 128 + .increment = 1 }, .digest_size = { .min = 64, diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c index 4602dfd4..7cb47c03 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c @@ -183,7 +183,7 @@ snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "snow3g_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c index 2c886d51..620a9dae 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c +++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c @@ -183,7 +183,7 @@ zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev, "zuc_pmd_%u_qp_%u", dev->data->dev_id, qp->id); - if (n > sizeof(qp->name)) + if (n >= sizeof(qp->name)) return -1; return 0; diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 4418c7fd..ff3c2406 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -112,6 +112,8 @@ struct bnxt_link_info { uint16_t auto_link_speed; uint16_t auto_link_speed_mask; uint32_t preemphasis; + uint8_t phy_type; + uint8_t media_type; }; #define BNXT_COS_QUEUE_COUNT 8 diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 3aedcb8d..6f88ac91 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -55,7 +55,7 @@ void bnxt_handle_async_event(struct bnxt *bp, case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: - bnxt_link_update_op(bp->eth_dev, 0); + bnxt_link_update_op(bp->eth_dev, 1); break; default: RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id); @@ -123,8 +123,10 @@ void bnxt_free_def_cp_ring(struct bnxt *bp) return; bnxt_free_ring(cpr->cp_ring_struct); + cpr->cp_ring_struct = NULL; rte_free(cpr->cp_ring_struct); rte_free(cpr); + bp->def_cp_ring = NULL; } /* For the default completion ring only */ diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 035fe07a..b6feb5ad 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -106,6 +106,8 @@ static struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); + /***********************/ /* @@ -261,6 +263,7 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } } + bnxt_print_link_info(bp->eth_dev); return 0; @@ -325,8 +328,13 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = 0; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; @@ -410,20 +418,6 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) return 0; } -static inline int -rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = ð_dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return 1; - - return 0; -} - static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) { struct rte_eth_link *link = ð_dev->data->dev_link; @@ -476,7 +470,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) bnxt_enable_int(bp); - bnxt_link_update_op(eth_dev, 0); + bnxt_link_update_op(eth_dev, 1); return 0; error: @@ -492,9 +486,14 @@ error: static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + int rc = 0; + + if (!bp->link_info.link_up) + rc = bnxt_set_hwrm_link_config(bp, true); + if (!rc) + eth_dev->data->dev_link.link_status = 1; - eth_dev->data->dev_link.link_status = 1; - bnxt_set_hwrm_link_config(bp, true); + bnxt_print_link_info(eth_dev); return 0; } @@ -504,6 +503,8 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) eth_dev->data->dev_link.link_status = 0; bnxt_set_hwrm_link_config(bp, false); + bp->link_info.link_up = 0; + return 0; } @@ -550,13 +551,14 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter, *temp_filter; - int i; + uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); + uint32_t i; /* * Loop through all VNICs from the specified filter flow pools to * remove the corresponding MAC addr filter */ - for (i = 0; i < MAX_FF_POOLS; i++) { + for (i = 0; i < pool; i++) { if (!(pool_mask & (1ULL << i))) continue; @@ -645,7 +647,8 @@ out: /* Timed out or success */ if (new.link_status != eth_dev->data->dev_link.link_status || new.link_speed != eth_dev->data->dev_link.link_speed) { - rte_bnxt_atomic_write_link_status(eth_dev, &new); + memcpy(ð_dev->data->dev_link, &new, + sizeof(struct rte_eth_link)); bnxt_print_link_info(eth_dev); } diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 93910d81..619bc979 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -177,8 +177,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; - req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | - mask); + req.mask = rte_cpu_to_le_32(mask); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -482,34 +481,38 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - uint32_t link_speed_mask = - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; HWRM_PREP(req, PORT_PHY_CFG, -1, resp); if (conf->link_up) { + /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ + if (bp->link_info.auto_mode && conf->link_speed) { + req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n"); + } + req.flags = rte_cpu_to_le_32(conf->phy_flags); req.force_link_speed = rte_cpu_to_le_16(conf->link_speed); + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; /* * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode = conf->auto_mode; - enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - if (conf->auto_mode == - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { - req.auto_link_speed_mask = - conf->auto_link_speed_mask; - enables |= link_speed_mask; - } - if (bp->link_info.auto_link_speed) { - req.auto_link_speed = - bp->link_info.auto_link_speed; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; - } + /* No speeds specified. Enable AutoNeg - all speeds */ + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS; + } + /* AutoNeg - Advertise speeds specified. */ + if (conf->auto_link_speed_mask) { + req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; } + req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; req.auto_pause = conf->auto_pause; @@ -557,6 +560,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->auto_pause = resp->auto_pause; link_info->force_pause = resp->force_pause; link_info->auto_mode = resp->auto_mode; + link_info->phy_type = resp->phy_type; + link_info->media_type = resp->media_type; link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); @@ -1275,6 +1280,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) return hw_link_duplex; } +static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) +{ + return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1; +} + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) { uint16_t eth_link_speed = 0; @@ -1483,7 +1493,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) int rc = 0; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_link_info link_req; - uint16_t speed; + uint16_t speed, autoneg; if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) return 0; @@ -1498,20 +1508,28 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) if (!link_up) goto port_phy_cfg; + autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - if (speed == 0) { + if (autoneg == 1) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; - link_req.auto_mode = - HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = bnxt_parse_eth_link_speed_mask(bp, dev_conf->link_speeds); } else { + if (bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET || + bp->link_info.phy_type == + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE || + bp->link_info.media_type == + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) { + RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n"); + return -EINVAL; + } + link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; link_req.link_speed = speed; - RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed); } link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); link_req.auto_pause = bp->link_info.auto_pause; diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index e93585a0..851f39ca 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -51,11 +51,18 @@ static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused, struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - uint32_t raw_cons = cpr->cp_raw_cons; - uint32_t cons; struct cmpl_base *cmp; + uint32_t raw_cons; + uint32_t cons; + if (cpr == NULL) + return; + + raw_cons = cpr->cp_raw_cons; while (1) { + if (!cpr || !cpr->cp_ring_struct) + return; + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); cmp = &cpr->cp_desc_ring[cons]; diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 5d93de26..980f3ec5 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -148,6 +148,17 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, } rx_buf->mbuf = NULL; + + if (likely(RX_CMP_IP_CS_OK(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE; + + if (likely(RX_CMP_L4_CS_OK(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE; + if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { /* Re-install the mbuf back to the rx ring */ bnxt_reuse_rx_mbuf(rxr, cons, mbuf); diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index f766b26c..111a2136 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -37,6 +37,22 @@ #define B_RX_DB(db, prod) \ (*(uint32_t *)db = (DB_KEY_RX | prod)) +#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) + +#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR) + +#define RX_CMP_L4_CS_OK(rxcmp1) \ + (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ + !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) + +#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR) + +#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC) + +#define RX_CMP_IP_CS_OK(rxcmp1) \ + (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ + !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) + struct bnxt_sw_rx_bd { struct rte_mbuf *mbuf; /* data associated with RX descriptor */ }; diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 8bf8fee3..c2f9ae78 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -161,7 +161,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | - PKT_TX_VLAN_PKT)) + PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM)) long_bd = true; tx_buf = &txr->tx_buf_ring[txr->tx_prod]; @@ -211,20 +211,38 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) { /* TSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_LSO; + txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO; txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len; txbd1->mss = tx_pkt->tso_segsz; - } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM | - PKT_TX_UDP_CKSUM)) { + } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) { + /* Outer IP, Inner IP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) { /* TCP/UDP CSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) { /* IP CSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) { + /* IP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM; txbd1->mss = 0; } } else { diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index 4c16101b..cb961f1b 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -69,4 +69,25 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM) +#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM) + + +#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM) + #endif diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index b4a1e727..8081981c 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -934,37 +934,30 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id) } int -bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev, +bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused, uint8_t slave_id) { - struct bond_dev_private *internals = bond_dev->data->dev_private; void *pkt = NULL; - struct port *port; - uint8_t i; + struct port *port = NULL; + uint8_t old_partner_state; - /* Given slave must be in active list */ - RTE_ASSERT(find_slave_by_id(internals->active_slaves, - internals->active_slave_count, slave_id) < internals->active_slave_count); + port = &mode_8023ad_ports[slave_id]; - /* Exclude slave from transmit policy. If this slave is an aggregator - * make all aggregated slaves unselected to force selection logic - * to select suitable aggregator for this port. */ - for (i = 0; i < internals->active_slave_count; i++) { - port = &mode_8023ad_ports[internals->active_slaves[i]]; - if (port->aggregator_port_id != slave_id) - continue; + ACTOR_STATE_CLR(port, AGGREGATION); + port->selected = UNSELECTED; - port->selected = UNSELECTED; + old_partner_state = port->partner_state; + record_default(port); - /* Use default aggregator */ - port->aggregator_port_id = internals->active_slaves[i]; - } + /* If partner timeout state changes then disable timer */ + if (!((old_partner_state ^ port->partner_state) & + STATE_LACP_SHORT_TIMEOUT)) + timer_cancel(&port->current_while_timer); - port = &mode_8023ad_ports[slave_id]; - port->selected = UNSELECTED; - port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING | - STATE_COLLECTING); + PARTNER_STATE_CLR(port, AGGREGATION); + ACTOR_STATE_CLR(port, EXPIRED); + /* flush rx/tx rings */ while (rte_ring_dequeue(port->rx_ring, &pkt) == 0) rte_pktmbuf_free((struct rte_mbuf *)pkt); diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 19afdac9..565a6959 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -403,6 +403,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, t4_os_atomic_list_del(&entry, &adap->mbox_list, &adap->mbox_lock); t4_report_fw_error(adap); + free(temp); return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; } @@ -446,6 +447,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, &adap->mbox_list, &adap->mbox_lock)); t4_report_fw_error(adap); + free(temp); return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT); } @@ -546,6 +548,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, T4_OS_MBOX_LOCKING( t4_os_atomic_list_del(&entry, &adap->mbox_list, &adap->mbox_lock)); + free(temp); return -G_FW_CMD_RETVAL((int)res); } } diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 9cf619fa..407021df 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -3839,7 +3839,7 @@ eth_igb_get_flex_filter(struct rte_eth_dev *dev, flex_filter.filter_info.priority = filter->priority; memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); memcpy(flex_filter.filter_info.mask, filter->mask, - RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char)); + RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); it = eth_igb_flex_filter_lookup(&filter_info->flex_list, &flex_filter.filter_info); @@ -5095,7 +5095,13 @@ eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t mask = 1 << queue_id; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t vec = E1000_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = E1000_RX_VEC_START; + + uint32_t mask = 1 << (queue_id + vec); E1000_WRITE_REG(hw, E1000_EIMC, mask); E1000_WRITE_FLUSH(hw); @@ -5108,7 +5114,13 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t mask = 1 << queue_id; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t vec = E1000_MISC_VEC_ID; + + if (rte_intr_allow_others(intr_handle)) + vec = E1000_RX_VEC_START; + + uint32_t mask = 1 << (queue_id + vec); uint32_t regval; regval = E1000_READ_REG(hw, E1000_EIMS); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 1861a32c..b25eff4f 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -231,7 +231,7 @@ enic_free_rq_buf(struct rte_mbuf **mbuf) return; rte_pktmbuf_free(*mbuf); - mbuf = NULL; + *mbuf = NULL; } void enic_init_vnic_resources(struct enic *enic) @@ -375,6 +375,7 @@ enic_alloc_consistent(void *priv, size_t size, pr_err("%s : Failed to allocate memory for memzone list\n", __func__); rte_memzone_free(rz); + return NULL; } mze->rz = rz; @@ -1124,11 +1125,12 @@ static int enic_reinit_rq(struct enic *enic, unsigned int rq_idx) { struct vnic_rq *sop_rq, *data_rq; - unsigned int cq_idx = enic_cq_rq(enic, rq_idx); + unsigned int cq_idx; int rc = 0; sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)]; + cq_idx = rq_idx; vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h index 38e7ba5b..67f2946e 100644 --- a/drivers/net/i40e/base/i40e_osdep.h +++ b/drivers/net/i40e/base/i40e_osdep.h @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -56,7 +57,6 @@ typedef uint16_t u16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; -typedef int bool; typedef enum i40e_status_code i40e_status; #define __iomem diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 65e10f3b..0835c2d4 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -83,12 +83,6 @@ /* Flow control default timer */ #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU -/* Flow control default high water */ -#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024) - -/* Flow control default low water */ -#define I40E_DEFAULT_LOW_WATER (0x1A40/1024) - /* Flow control enable fwd bit */ #define I40E_PRTMAC_FWD_CTRL 0x00000001 @@ -98,6 +92,12 @@ /* Kilobytes shift */ #define I40E_KILOSHIFT 10 +/* Flow control default high water */ +#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT) + +/* Flow control default low water */ +#define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT) + /* Receive Average Packet Size in Byte*/ #define I40E_PACKET_AVERAGE_SIZE 128 @@ -422,6 +422,12 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); +static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw, + uint16_t seid, + uint16_t rule_type, + uint16_t *entries, + uint16_t count, + uint16_t rule_id); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t sw_id, uint8_t on); @@ -723,23 +729,22 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { /* - * Initialize registers for flexible payload, which should be set by NVM. - * This should be removed from code once it is fixed in NVM. + * Force global configuration for flexible payload + * to the first 16 bytes of the corresponding L2/L3/L4 paylod. + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B); I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440); - /* Initialize registers for parsing packet type of QinQ */ + /* + * Initialize registers for parsing packet type of QinQ + * This should be removed from code once proper + * configuration API is added to avoid configuration conflicts + * between ports of the same device. + */ I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029); I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420); } @@ -1819,7 +1824,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *main_vsi = pf->main_vsi; - struct i40e_mirror_rule *p_mirror; struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int i; @@ -1845,13 +1849,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* Set link down */ i40e_dev_set_link_down(dev); - /* Remove all mirror rules */ - while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { - TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); - rte_free(p_mirror); - } - pf->nb_mirror_rule = 0; - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -1871,13 +1868,35 @@ i40e_dev_close(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_rule *p_mirror; uint32_t reg; int i; + int ret; PMD_INIT_FUNC_TRACE(); i40e_dev_stop(dev); hw->adapter_stopped = 1; + + /* Remove all mirror rules */ + while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) { + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + p_mirror->rule_type, + p_mirror->entries, + p_mirror->num_entries, + p_mirror->id); + if (ret < 0) + PMD_DRV_LOG(ERR, "failed to remove mirror rule: " + "status = %d, aq_err = %d.", ret, + hw->aq.asq_last_status); + + /* remove mirror software resource anyway */ + TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules); + rte_free(p_mirror); + pf->nb_mirror_rule--; + } + i40e_dev_free_queues(dev); /* Disable interrupt */ @@ -2376,13 +2395,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* call read registers - updates values, now write them to struct */ i40e_read_stats_registers(pf, hw); - stats->ipackets = pf->main_vsi->eth_stats.rx_unicast + - pf->main_vsi->eth_stats.rx_multicast + - pf->main_vsi->eth_stats.rx_broadcast - + stats->ipackets = ns->eth.rx_unicast + + ns->eth.rx_multicast + + ns->eth.rx_broadcast - + ns->eth.rx_discards - pf->main_vsi->eth_stats.rx_discards; - stats->opackets = pf->main_vsi->eth_stats.tx_unicast + - pf->main_vsi->eth_stats.tx_multicast + - pf->main_vsi->eth_stats.tx_broadcast; + stats->opackets = ns->eth.tx_unicast + + ns->eth.tx_multicast + + ns->eth.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + @@ -2879,6 +2899,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); fc_conf->pause_time = pf->fc_conf.pause_time; + + /* read out from register, in case they are modified by other port */ + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT; + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = + I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT; + fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]; fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]; @@ -8354,7 +8381,7 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) /* For both X710 and XL710 */ #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index efd4fac1..1686914a 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -68,7 +68,6 @@ #include "i40e_ethdev.h" #include "i40e_pf.h" #define I40EVF_VSI_DEFAULT_MSIX_INTR 1 -#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0 /* busy wait delay in msec */ #define I40EVF_BUSY_WAIT_DELAY 10 @@ -723,11 +722,12 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) uint32_t vector_id; int i, err; - if (rte_intr_allow_others(intr_handle)) { + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_allow_others(intr_handle)) { if (vf->version_major == I40E_DPDK_VERSION_MAJOR) vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR; else - vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX; + vector_id = I40E_RX_VEC_START; } else { vector_id = I40E_MISC_VEC_ID; } @@ -859,7 +859,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, int err; struct vf_cmd_info args; - if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) { + if (is_zero_ether_addr(addr)) { PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", addr->addr_bytes[0], addr->addr_bytes[1], addr->addr_bytes[2], addr->addr_bytes[3], @@ -952,16 +952,74 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) return 0; } +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static void +i40evf_update_vsi_stats(struct i40e_vsi *vsi, + struct i40e_eth_stats *nes) +{ + struct i40e_eth_stats *oes = &vsi->eth_stats_offset; + + i40evf_stat_update_48(&oes->rx_bytes, + &nes->rx_bytes); + i40evf_stat_update_48(&oes->rx_unicast, + &nes->rx_unicast); + i40evf_stat_update_48(&oes->rx_multicast, + &nes->rx_multicast); + i40evf_stat_update_48(&oes->rx_broadcast, + &nes->rx_broadcast); + i40evf_stat_update_32(&oes->rx_discards, + &nes->rx_discards); + i40evf_stat_update_32(&oes->rx_unknown_protocol, + &nes->rx_unknown_protocol); + i40evf_stat_update_48(&oes->tx_bytes, + &nes->tx_bytes); + i40evf_stat_update_48(&oes->tx_unicast, + &nes->tx_unicast); + i40evf_stat_update_48(&oes->tx_multicast, + &nes->tx_multicast); + i40evf_stat_update_48(&oes->tx_broadcast, + &nes->tx_broadcast); + i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors); + i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards); +} + static int i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { int ret; struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; ret = i40evf_update_stats(dev, &pstats); if (ret != 0) return 0; + i40evf_update_vsi_stats(vsi, pstats); + stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + pstats->rx_broadcast; stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + @@ -984,7 +1042,7 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev) i40evf_update_stats(dev, &pstats); /* set stats offset base on current values */ - vf->vsi.eth_stats_offset = vf->vsi.eth_stats; + vf->vsi.eth_stats_offset = *pstats; } static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, @@ -1008,6 +1066,8 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, int ret; unsigned i; struct i40e_eth_stats *pstats = NULL; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_vsi *vsi = &vf->vsi; if (n < I40EVF_NB_XSTATS) return I40EVF_NB_XSTATS; @@ -1019,6 +1079,8 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, if (!xstats) return 0; + i40evf_update_vsi_stats(vsi, pstats); + /* loop over xstats array and values from pstats */ for (i = 0; i < I40EVF_NB_XSTATS; i++) { xstats[i].id = i; @@ -1210,29 +1272,29 @@ i40evf_init_vf(struct rte_eth_dev *dev) /* VF reset, shutdown admin queue and initialize again */ if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed"); - return -1; + goto err; } i40e_init_adminq_parameter(hw); if (i40e_init_adminq(hw) != I40E_SUCCESS) { PMD_INIT_LOG(ERR, "init_adminq failed"); - return -1; + goto err; } vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0); if (!vf->aq_resp) { PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); - goto err_aq; + goto err_aq; } if (i40evf_check_api_version(dev) != 0) { PMD_INIT_LOG(ERR, "check_api version failed"); - goto err_aq; + goto err_api; } bufsz = sizeof(struct i40e_virtchnl_vf_resource) + (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); if (!vf->vf_res) { PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); - goto err_aq; + goto err_api; } if (i40evf_get_vf_resource(dev) != 0) { @@ -1279,6 +1341,9 @@ i40evf_init_vf(struct rte_eth_dev *dev) err_alloc: rte_free(vf->vf_res); + vf->vsi_res = NULL; +err_api: + rte_free(vf->aq_resp); err_aq: i40e_shutdown_adminq(hw); /* ignore error */ err: @@ -1439,7 +1504,6 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, done: i40evf_enable_irq0(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); } static int @@ -2090,7 +2154,20 @@ i40evf_dev_start(struct rte_eth_dev *dev) goto err_mac; } + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in i40evf_dev_init( ). + * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is + * not cleared, it will fail when rte_intr_enable( ) tries to map Rx + * queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); + } + i40evf_enable_queues_intr(dev); + return 0; err_mac: @@ -2160,6 +2237,8 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_duplex = ETH_LINK_FULL_DUPLEX; new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + new_link.link_autoneg = + dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED; i40evf_dev_atomic_write_link_status(dev, &new_link); diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index b36d9019..c5e06ca7 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -902,7 +902,10 @@ send_msg: static void i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) { + struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct i40e_virtchnl_pf_event event; + uint16_t vf_id = vf->vf_idx; + uint32_t tval, rval; event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; event.event_data.link_event.link_status = @@ -934,8 +937,15 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) break; } - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id)); + rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id)); + + if (tval & I40E_VF_ATQLEN_ATQLEN_MASK || + tval & I40E_VF_ATQLEN_ATQENABLE_MASK || + rval & I40E_VF_ARQLEN_ARQLEN_MASK || + rval & I40E_VF_ARQLEN_ARQENABLE_MASK) + i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT, + I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); } void diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index ba33b2a5..86546ca8 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1646,36 +1646,42 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; struct i40e_rx_queue *rxq; const struct rte_memzone *rz; uint32_t ring_size; uint16_t len, i; - uint16_t base, bsf, tc_mapping; - int use_def_burst_func = 1; + uint16_t reg_idx, base, bsf, tc_mapping; + int q_offset, use_def_burst_func = 1; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { - struct i40e_vf *vf = - I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); - - if (vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI not available or queue " - "index exceeds the maximum"); - return I40E_ERR_PARAM; + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; } + if (nb_desc % I40E_ALIGN_RING_DESC != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is " "invalid", nb_desc); - return I40E_ERR_PARAM; + return -EINVAL; } /* Free memory if needed */ @@ -1698,12 +1704,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; - if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) - rxq->reg_idx = queue_idx; - else /* PF device */ - rxq->reg_idx = vsi->base_queue + - i40e_get_queue_offset_by_qindex(pf, queue_idx); - + rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); @@ -1862,34 +1863,40 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct i40e_vsi *vsi; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi; + struct i40e_pf *pf = NULL; + struct i40e_vf *vf = NULL; struct i40e_tx_queue *txq; const struct rte_memzone *tz; uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; - uint16_t i, base, bsf, tc_mapping; + uint16_t reg_idx, i, base, bsf, tc_mapping; + int q_offset; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { - struct i40e_vf *vf = - I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else + if (!vsi) + return -EINVAL; + reg_idx = queue_idx; + } else { + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); - - if (vsi == NULL) { - PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) " - "exceeds the maximum", queue_idx); - return I40E_ERR_PARAM; + if (!vsi) + return -EINVAL; + q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx); + if (q_offset < 0) + return -EINVAL; + reg_idx = vsi->base_queue + q_offset; } if (nb_desc % I40E_ALIGN_RING_DESC != 0 || - (nb_desc > I40E_MAX_RING_DESC) || - (nb_desc < I40E_MIN_RING_DESC)) { + (nb_desc > I40E_MAX_RING_DESC) || + (nb_desc < I40E_MIN_RING_DESC)) { PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is " "invalid", nb_desc); - return I40E_ERR_PARAM; + return -EINVAL; } /** @@ -1998,12 +2005,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->hthresh = tx_conf->tx_thresh.hthresh; txq->wthresh = tx_conf->tx_thresh.wthresh; txq->queue_id = queue_idx; - if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) - txq->reg_idx = queue_idx; - else /* PF device */ - txq->reg_idx = vsi->base_queue + - i40e_get_queue_offset_by_qindex(pf, queue_idx); - + txq->reg_idx = reg_idx; txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; txq->vsi = vsi; @@ -2157,18 +2159,40 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq) void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) { + struct rte_eth_dev *dev; uint16_t i; + dev = &rte_eth_devices[txq->port_id]; + if (!txq || !txq->sw_ring) { PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); return; } - for (i = 0; i < txq->nb_tx_desc; i++) { - if (txq->sw_ring[i].mbuf) { + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } + } else { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } } } diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index d235daa7..557593a4 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -192,8 +192,7 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) #endif #define PKTLEN_SHIFT 10 - -#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL +#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t)) static inline void desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) @@ -224,7 +223,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, struct i40e_rx_entry *sw_ring; uint16_t nb_pkts_recd; int pos; - uint64_t var; /* mask to shuffle from desc. to mbuf */ uint8x16_t shuf_msk = { @@ -357,7 +355,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* C.2 get 4 pkts staterr value */ staterr = vzipq_u16(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; - stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); desc_to_olflags_v(descs, &rx_pkts[pos]); @@ -422,6 +419,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, rx_pkts[pos + 3]->next = NULL; } + staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1); + staterr = vreinterpretq_u16_s16( + vshrq_n_s16(vreinterpretq_s16_u16(staterr), + I40E_UINT16_BIT - 1)); + stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); + rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); /* D.3 copy final 1,2 data to rx_pkts */ @@ -431,10 +434,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos]); /* C.4 calc avaialbe number of desc */ - var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK); - nb_pkts_recd += var; - if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + if (unlikely(stat == 0)) { + nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; + } else { + nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT; break; + } } /* Update our internal tail pointer */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index f994fedc..73996bbe 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -3230,6 +3230,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); @@ -4250,6 +4251,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ @@ -5013,13 +5023,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -6778,6 +6788,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -6785,19 +6797,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index 09440ccb..a760b1bb 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -263,7 +263,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_MSIX_MODE; + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT; switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { case ETH_64_POOLS: diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index d1e300a5..8b18b530 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -3395,12 +3395,19 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3422,12 +3429,18 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3443,12 +3456,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3464,12 +3483,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index aa9d2dcb..86d1e44c 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -463,8 +463,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("using port %u (%08" PRIx32 ")", port, test); ctx = ibv_open_device(ibv_dev); - if (ctx == NULL) + if (ctx == NULL) { + err = ENODEV; goto port_error; + } /* Check port status. */ err = ibv_query_port(ctx, port, &port_attr); @@ -476,6 +478,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { ERROR("port %d is not configured in Ethernet mode", port); + err = EINVAL; goto port_error; } @@ -519,6 +522,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) } if (ibv_exp_query_device(ctx, &exp_device_attr)) { ERROR("ibv_exp_query_device() failed"); + err = ENODEV; goto port_error; } @@ -581,6 +585,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) if (priv_get_mac(priv, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx5_en loaded?" " (errno: %s)", strerror(errno)); + err = ENODEV; goto port_error; } INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index ca981a53..2ea995f8 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -49,6 +49,7 @@ #include #include #include +#include /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC @@ -119,7 +120,6 @@ struct ethtool_link_settings { #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 #endif -#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX) /** * Return private structure associated with an Ethernet device. @@ -755,12 +755,7 @@ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) { struct priv *priv = mlx5_get_priv(dev); - __extension__ struct { - struct ethtool_link_settings edata; - uint32_t link_mode_data[3 * - ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32]; - } ecmd; - + struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; @@ -773,59 +768,65 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); - memset(&ecmd, 0, sizeof(ecmd)); - ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS; - ifr.ifr_data = (void *)&ecmd; + ifr.ifr_data = (void *)&gcmd; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; } - ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords; + gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; + + alignas(struct ethtool_link_settings) + uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + + sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; + struct ethtool_link_settings *ecmd = (void *)data; + + *ecmd = gcmd; + ifr.ifr_data = (void *)ecmd; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; } - dev_link.link_speed = ecmd.edata.speed; - sc = ecmd.edata.link_mode_masks[0] | - ((uint64_t)ecmd.edata.link_mode_masks[1] << 32); + dev_link.link_speed = ecmd->speed; + sc = ecmd->link_mode_masks[0] | + ((uint64_t)ecmd->link_mode_masks[1] << 32); priv->link_speed_capa = 0; - if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT) + if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; - if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT | - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_1G; - if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT | - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT | - ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_10G; - if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT | - ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_20G; - if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT | - ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT | - ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT | - ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_40G; - if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT | - ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT | - ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT | - ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_56G; - if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT | - ETHTOOL_LINK_MODE_25000baseKR_Full_BIT | - ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_25G; - if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT | - ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_50G; - if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT | - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT | - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT | - ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)) + if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | + MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) priv->link_speed_capa |= ETH_LINK_SPEED_100G; - dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ? + dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index aea203b3..5095a2b4 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -609,11 +609,9 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) int priv_rehash_flows(struct priv *priv) { - enum hash_rxq_flow_type i; + size_t i; - for (i = HASH_RXQ_FLOW_TYPE_PROMISC; - i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); - ++i) + for (i = 0; i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); ++i) if (!priv_allow_flow_type(priv, i)) { priv_special_flow_disable(priv, i); } else { diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 58926e39..92e4fd53 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -431,8 +431,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) #ifdef MLX5_PMD_SOFT_COUNTERS total_length = length; #endif - if (length < (MLX5_WQE_DWORD_SIZE + 2)) + if (length < (MLX5_WQE_DWORD_SIZE + 2)) { + txq->stats.oerrors++; break; + } /* Update element. */ (*txq->elts)[elts_head] = buf; elts_head = (elts_head + 1) & (elts_n - 1); @@ -735,8 +737,10 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (max < segs_n + 1) break; /* Do not bother with large packets MPW cannot handle. */ - if (segs_n > MLX5_MPW_DSEG_MAX) + if (segs_n > MLX5_MPW_DSEG_MAX) { + txq->stats.oerrors++; break; + } max -= segs_n; --pkts_n; /* Should we enable HW CKSUM offload */ @@ -941,8 +945,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, if (max < segs_n + 1) break; /* Do not bother with large packets MPW cannot handle. */ - if (segs_n > MLX5_MPW_DSEG_MAX) + if (segs_n > MLX5_MPW_DSEG_MAX) { + txq->stats.oerrors++; break; + } max -= segs_n; --pkts_n; /* Should we enable HW CKSUM offload */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 909d80e6..d3d6d6ac 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -81,7 +81,7 @@ struct mlx5_txq_stats { uint64_t opackets; /**< Total of successfully sent packets. */ uint64_t obytes; /**< Total of successfully sent bytes. */ #endif - uint64_t odropped; /**< Total of packets not sent when TX ring full. */ + uint64_t oerrors; /**< Total number of failed transmitted packets. */ }; /* Flow director queue structure. */ diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index f2b5781a..79b18410 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -94,13 +94,13 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) tmp.q_opackets[idx] += txq->stats.opackets; tmp.q_obytes[idx] += txq->stats.obytes; #endif - tmp.q_errors[idx] += txq->stats.odropped; + tmp.q_errors[idx] += txq->stats.oerrors; } #ifdef MLX5_PMD_SOFT_COUNTERS tmp.opackets += txq->stats.opackets; tmp.obytes += txq->stats.obytes; #endif - tmp.oerrors += txq->stats.odropped; + tmp.oerrors += txq->stats.oerrors; } #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: retrieve and add hardware counters. */ diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index a824787f..f126979b 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -35,6 +35,7 @@ #define RTE_PMD_MLX5_UTILS_H_ #include +#include #include #include #include @@ -61,6 +62,9 @@ !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \ ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))) +/* Convert a bit number to the corresponding 64-bit mask */ +#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) + /* Save and restore errno around argument evaluation. */ #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0])) diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 49c52930..74453204 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -857,6 +857,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); + /* reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) @@ -2120,7 +2122,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev, reta &= ~(0xFF << (8 * j)); reta |= reta_conf[idx].reta[shift + j] << (8 * j); } - nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta); + nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, + reta); } update = NFP_NET_CFG_UPDATE_RSS; @@ -2167,7 +2170,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift); + reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + + shift); for (j = 0; j < 4; j++) { if (!(mask & (0x1 << j))) continue; @@ -2217,6 +2221,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, NFP_NET_CFG_RSS_IPV6_TCP | NFP_NET_CFG_RSS_IPV6_UDP; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK; + cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ; + /* configuring where to apply the RSS hash */ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl); diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 1a208ffc..f6b3c10c 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -411,11 +411,13 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) /* The dumper is created using the previous pcap_t reference */ *dumper = pcap_dump_open(tx_pcap, pcap_filename); if (*dumper == NULL) { + pcap_close(tx_pcap); RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename); return -1; } + pcap_close(tx_pcap); return 0; } diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile index 29b443df..18150b58 100644 --- a/drivers/net/qede/Makefile +++ b/drivers/net/qede/Makefile @@ -66,8 +66,9 @@ CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0) CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion endif -else -CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type +else #ICC +CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type +CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant endif # diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index 907b35b9..e2da8aac 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -624,45 +624,45 @@ struct ecore_dev { u16 device_id; u16 chip_num; - #define CHIP_NUM_MASK 0xffff - #define CHIP_NUM_SHIFT 16 +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 0 - u16 chip_rev; - #define CHIP_REV_MASK 0xf - #define CHIP_REV_SHIFT 12 + u8 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 0 #ifndef ASIC_ONLY - #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5) - #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe) - #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc) - #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \ - CHIP_REV_IS_EMUL_B0(_p_dev)) - #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf) - #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd) - #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \ - CHIP_REV_IS_FPGA_B0(_p_dev)) - #define CHIP_REV_IS_SLOW(_p_dev) \ - (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev)) - #define CHIP_REV_IS_A0(_p_dev) \ - (CHIP_REV_IS_EMUL_A0(_p_dev) || \ - CHIP_REV_IS_FPGA_A0(_p_dev) || \ - !(_p_dev)->chip_rev) - #define CHIP_REV_IS_B0(_p_dev) \ - (CHIP_REV_IS_EMUL_B0(_p_dev) || \ - CHIP_REV_IS_FPGA_B0(_p_dev) || \ - (_p_dev)->chip_rev == 1) - #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev) +#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5) +#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe) +#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc) +#define CHIP_REV_IS_EMUL(_p_dev) \ + (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev)) +#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf) +#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd) +#define CHIP_REV_IS_FPGA(_p_dev) \ + (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev)) +#define CHIP_REV_IS_SLOW(_p_dev) \ + (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev)) +#define CHIP_REV_IS_A0(_p_dev) \ + (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \ + (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)) +#define CHIP_REV_IS_B0(_p_dev) \ + (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \ + ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)) +#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev) #else - #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev) - #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1) +#define CHIP_REV_IS_A0(_p_dev) \ + (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal) +#define CHIP_REV_IS_B0(_p_dev) \ + ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal) #endif - u16 chip_metal; - #define CHIP_METAL_MASK 0xff - #define CHIP_METAL_SHIFT 4 + u8 chip_metal; +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 0 - u16 chip_bond_id; - #define CHIP_BOND_ID_MASK 0xf - #define CHIP_BOND_ID_SHIFT 0 + u8 chip_bond_id; +#define CHIP_BOND_ID_MASK 0xff +#define CHIP_BOND_ID_SHIFT 0 u8 num_engines; u8 num_ports_in_engines; @@ -670,12 +670,12 @@ struct ecore_dev { u8 path_id; enum ecore_mf_mode mf_mode; - #define IS_MF_DEFAULT(_p_hwfn) \ - (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) - #define IS_MF_SI(_p_hwfn) \ - (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR) - #define IS_MF_SD(_p_hwfn) \ - (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN) +#define IS_MF_DEFAULT(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR) +#define IS_MF_SD(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN) int pcie_width; int pcie_speed; @@ -804,6 +804,7 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev, int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate); void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, u32 min_pf_rate); int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw); diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c index 3dd953d9..5e2f0293 100644 --- a/drivers/net/qede/base/ecore_cxt.c +++ b/drivers/net/qede/base/ecore_cxt.c @@ -67,6 +67,7 @@ union type0_task_context { /* TYPE-1 task context - ROCE */ union type1_task_context { + struct regpair reserved; /* @DPDK */ }; struct src_ent { diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c index 8175619a..8aa3c0b7 100644 --- a/drivers/net/qede/base/ecore_dcbx.c +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -437,7 +437,7 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn, p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + for (i = 0; i < p_params->num_app_entries; i++) { entry = &p_params->app_entry[i]; if (ieee) { u8 sf_ieee; @@ -619,7 +619,7 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } -static enum _ecore_status_t +static void ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_dcbx_get *params) @@ -644,7 +644,7 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn, if (!enabled) { p_operational->enabled = enabled; p_operational->valid = false; - return ECORE_INVAL; + return; } p_data = &p_operational->params; @@ -671,8 +671,6 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn, p_operational->err = err; p_operational->enabled = enabled; p_operational->valid = true; - - return rc; } static enum _ecore_status_t @@ -1145,7 +1143,7 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn, p_app->flags |= (u32)p_params->num_app_entries << DCBX_APP_NUM_ENTRIES_SHIFT; - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + for (i = 0; i < p_params->num_app_entries; i++) { entry = &p_app->app_pri_tbl[i].entry; if (ieee) { *entry &= ~DCBX_APP_SF_IEEE_MASK; @@ -1340,7 +1338,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn, p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params, &dcbx_info->operational.params, - sizeof(struct ecore_dcbx_admin_params)); + sizeof(p_hwfn->p_dcbx_info->set.config.params)); p_hwfn->p_dcbx_info->set.config.valid = true; OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set, diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 6060f9ee..d2dc044d 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -2856,12 +2856,10 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) else p_dev->type = ECORE_DEV_TYPE_BB; - p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_NUM); - p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_REV); - - MASK_FIELD(CHIP_REV, p_dev->chip_rev); + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); + p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM); + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_REV); + p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV); /* Learn number of HW-functions */ tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, @@ -2885,20 +2883,19 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) } #endif - p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_TEST_REG) >> 4; - MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); - p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, - MISCS_REG_CHIP_METAL); - MASK_FIELD(CHIP_METAL, p_dev->chip_metal); + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_TEST_REG); + p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID); + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_METAL); + p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL); + DP_INFO(p_dev->hwfns, - "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + "Chip details - %s%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n", ECORE_IS_BB(p_dev) ? "BB" : "AH", CHIP_REV_IS_A0(p_dev) ? 0 : 1, p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, p_dev->chip_metal); - if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { + if (ECORE_IS_BB_A0(p_dev)) { DP_NOTICE(p_dev->hwfns, false, "The chip type/rev (BB A0) is not supported!\n"); return ECORE_ABORTED; @@ -4111,6 +4108,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) /* API to configure WFQ from mcp link change */ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, u32 min_pf_rate) { int i; @@ -4125,8 +4123,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, for_each_hwfn(p_dev, i) { struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; - __ecore_configure_vp_wfq_on_link_change(p_hwfn, - p_hwfn->p_dpc_ptt, + __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, min_pf_rate); } } diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c index 7f4db0a0..3db5cc3b 100644 --- a/drivers/net/qede/base/ecore_hw.c +++ b/drivers/net/qede/base/ecore_hw.c @@ -133,7 +133,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); } -u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +static u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { /* The HW is using DWORDS and we need to translate it to Bytes */ return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2; diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h index 0750b2ed..c246f188 100644 --- a/drivers/net/qede/base/ecore_hw.h +++ b/drivers/net/qede/base/ecore_hw.h @@ -97,17 +97,6 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn); */ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn); -/** - * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address - * - * @param p_hwfn - * @param p_ptt - * - * @return u32 - */ -u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt); - /** * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address * diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index 2ff97155..fad07466 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -746,7 +746,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, /* Mintz bandwidth configuration */ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); - ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, + ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt, p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index b28d7281..064d7e72 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -1214,13 +1214,17 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, ¶ms); - ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, - mbx->req_virt->first_tlv.reply_address, - sizeof(u64) / 4, ¶ms); - + /* Once PF copies the rc to the VF, the latter can continue and + * and send an additional message. So we have to make sure the + * channel would be re-set to ready prior to that. + */ REG_WR(p_hwfn, GTT_BAR0_MAP_REG_USDM_RAM + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); } static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn, @@ -2806,12 +2810,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn, goto out; } - /* Update shadow copy of the VF configuration */ + /* Update shadow copy of the VF configuration. In case shadow indicates + * the action should be blocked return success to VF to imitate the + * firmware behaviour in such case. + */ if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) != - ECORE_SUCCESS) { - status = PFVF_STATUS_FAILURE; + ECORE_SUCCESS) goto out; - } /* Determine if the unicast filtering is acceptible by PF */ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index be8b1ec4..5ff8f28a 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -325,7 +325,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) /* get HW info */ p_hwfn->p_dev->type = resp->pfdev_info.dev_type; - p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev; + p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev; DP_INFO(p_hwfn, "Chip details - %s%d\n", ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH", diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 67ebb1e5..8592485d 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1523,6 +1523,19 @@ virtio_dev_start(struct rte_eth_dev *dev) struct virtnet_rx *rxvq; struct virtnet_tx *txvq __rte_unused; struct virtio_hw *hw = dev->data->dev_private; + int ret; + + /* Finish the initialization of the queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ret = virtio_dev_rx_queue_setup_finish(dev, i); + if (ret < 0) + return ret; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ret = virtio_dev_tx_queue_setup_finish(dev, i); + if (ret < 0) + return ret; + } /* check if lsc interrupt feature is enabled */ if (dev->data->dev_conf.intr_conf.lsc) { @@ -1551,6 +1564,8 @@ virtio_dev_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxvq = dev->data->rx_queues[i]; + /* Flush the old packets */ + virtqueue_flush(rxvq->vq); virtqueue_notify(rxvq->vq); } diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 4feccf93..c491ec1b 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -88,10 +88,16 @@ int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool); +int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t rx_queue_id); + int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); +int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t tx_queue_id); + uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index a33ef1a8..43fac63b 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -72,7 +72,7 @@ #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ ETH_TXQ_FLAGS_NOOFFLOADS) -static void +void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { struct vring_desc *dp, *dp_tail; @@ -291,6 +291,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* prepend cannot fail, checked by caller */ hdr = (struct virtio_net_hdr *) rte_pktmbuf_prepend(cookie, head_size); + /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, + * which is wrong. Below subtract restores correct pkt size. + */ + cookie->pkt_len -= head_size; /* if offload disabled, it is not zeroed below, do it now */ if (offload == 0) { ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); @@ -412,9 +416,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; - int error, nbufs; - struct rte_mbuf *m; - uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -431,10 +432,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, } dev->data->rx_queues[queue_idx] = rxvq; + return 0; +} + +int +virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq = &vq->rxq; + struct rte_mbuf *m; + uint16_t desc_idx; + int error, nbufs; + + PMD_INIT_FUNC_TRACE(); /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; - error = ENOSPC; if (hw->use_simple_rxtx) { for (desc_idx = 0; desc_idx < vq->vq_nentries; @@ -525,7 +540,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_tx *txvq; uint16_t tx_free_thresh; - uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -554,9 +568,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; - if (hw->use_simple_rxtx) { - uint16_t mid_idx = vq->vq_nentries >> 1; + dev->data->tx_queues[queue_idx] = txvq; + return 0; +} + +int +virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t queue_idx) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + uint16_t mid_idx = vq->vq_nentries >> 1; + struct virtnet_tx *txvq = &vq->txq; + uint16_t desc_idx; + PMD_INIT_FUNC_TRACE(); + + if (hw->use_simple_rxtx) { for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { vq->vq_ring.avail->ring[desc_idx] = desc_idx + mid_idx; @@ -578,7 +607,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, VIRTQUEUE_DUMP(vq); - dev->data->tx_queues[queue_idx] = txvq; return 0; } @@ -661,7 +689,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) * In case of SCTP, this will be wrong since it's a CRC * but there's nothing we can do. */ - uint16_t csum, off; + uint16_t csum = 0, off; rte_raw_cksum_mbuf(m, hdr->csum_start, rte_pktmbuf_pkt_len(m) - hdr->csum_start, diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index b651e53b..a6c0b34e 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -65,6 +65,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, struct vring_desc *start_dp; uint16_t desc_idx; + cookie->port = vq->rxq.port_id; + desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); dxp = &vq->vq_descx[desc_idx]; dxp->cookie = (void *)cookie; diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c index 082e8217..6f4845b7 100644 --- a/drivers/net/virtio/virtio_user/vhost_user.c +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -97,6 +97,10 @@ vhost_user_read(int fd, struct vhost_user_msg *msg) } sz_payload = msg->size; + + if ((size_t)sz_payload > sizeof(msg->payload)) + goto fail; + if (sz_payload) { ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0); if (ret < sz_payload) { diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c index 7f60e3ef..4f8707ae 100644 --- a/drivers/net/virtio/virtqueue.c +++ b/drivers/net/virtio/virtqueue.c @@ -70,3 +70,28 @@ virtqueue_detatch_unused(struct virtqueue *vq) } return NULL; } + +/* Flush the elements in the used ring. */ +void +virtqueue_flush(struct virtqueue *vq) +{ + struct vring_used_elem *uep; + struct vq_desc_extra *dxp; + uint16_t used_idx, desc_idx; + uint16_t nb_used, i; + + nb_used = VIRTQUEUE_NUSED(vq); + + for (i = 0; i < nb_used; i++) { + used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t)uep->id; + dxp = &vq->vq_descx[desc_idx]; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + } +} diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 569c251c..ec967a5b 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -290,6 +290,9 @@ void virtqueue_dump(struct virtqueue *vq); */ struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq); +/* Flush the elements in the used ring. */ +void virtqueue_flush(struct virtqueue *vq); + static inline int virtqueue_full(const struct virtqueue *vq) { @@ -298,6 +301,8 @@ virtqueue_full(const struct virtqueue *vq) #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) +void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx); + static inline void vq_update_avail_idx(struct virtqueue *vq) { diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index aedac6ca..2bd2f272 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -441,10 +441,10 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - val = *(const uint32_t *)addr; + memcpy(&val, addr, 4); VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); - val = (addr[5] << 8) | addr[4]; + memcpy(&val, addr + 4, 2); VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); } @@ -762,6 +762,8 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct vmxnet3_hw *hw = dev->data->dev_private; + ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr)); + ether_addr_copy(mac_addr, &dev->data->mac_addrs[0]); vmxnet3_write_mac(hw, mac_addr->addr_bytes); } diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 3056f4ff..5ef7773b 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -194,6 +194,8 @@ vmxnet3_dev_tx_queue_release(void *txq) vmxnet3_cmd_ring_release(&tq->cmd_ring); /* Release the memzone */ rte_memzone_free(tq->mz); + /* Release the queue */ + rte_free(tq); } } @@ -214,6 +216,9 @@ vmxnet3_dev_rx_queue_release(void *rxq) /* Release the memzone */ rte_memzone_free(rq->mz); + + /* Release the queue */ + rte_free(rq); } } @@ -254,11 +259,9 @@ vmxnet3_dev_rx_queue_reset(void *rxq) struct vmxnet3_comp_ring *comp_ring; int size; - if (rq != NULL) { - /* Release both the cmd_rings mbufs */ - for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) - vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]); - } + /* Release both the cmd_rings mbufs */ + for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) + vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]); ring0 = &rq->cmd_ring[0]; ring1 = &rq->cmd_ring[1]; -- cgit 1.2.3-korg