diff options
Diffstat (limited to 'drivers')
33 files changed, 362 insertions, 160 deletions
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 3d49e2ae..287c8a50 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_MD5_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 12, @@ -69,9 +69,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 12, @@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 14, @@ -111,9 +111,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 16, @@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 24, @@ -153,9 +153,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 32, diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 139fed14..a072e6e3 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_MD5_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 16, @@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 20, @@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 28, @@ -174,9 +174,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 32, @@ -216,9 +216,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 48, @@ -258,9 +258,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 64, diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index 8900668d..1c4b184e 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -99,6 +99,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_NULL: + return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum state1 size in this case */ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, @@ -761,6 +764,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, state2_size = ICP_QAT_HW_MD5_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_NULL: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_NULL); + state2_size = ICP_QAT_HW_NULL_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: state1_size = qat_hash_get_state1_size( diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 0fe0b232..f7fcecea 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -76,9 +76,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 20, @@ -97,9 +97,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 28, @@ -118,9 +118,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, .block_size = 64, .key_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 32, @@ -137,11 +137,11 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 64, + .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 1 }, .digest_size = { .min = 48, @@ -160,9 +160,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, .block_size = 128, .key_size = { - .min = 128, + .min = 1, .max = 128, - .increment = 0 + .increment = 128 }, .digest_size = { .min = 64, @@ -181,9 +181,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { .algo = RTE_CRYPTO_AUTH_MD5_HMAC, .block_size = 64, .key_size = { - .min = 8, + .min = 1, .max = 64, - .increment = 8 + .increment = 1 }, .digest_size = { .min = 16, diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 07e71241..93910d81 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -194,6 +194,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp, struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; + if (filter->fw_l2_filter_id == UINT64_MAX) + return 0; + HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); @@ -216,6 +219,9 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; + if (filter->fw_l2_filter_id != UINT64_MAX) + bnxt_hwrm_clear_filter(bp, filter); + HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp); req.flags = rte_cpu_to_le_32(filter->flags); @@ -476,6 +482,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_input req = {0}; struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; + uint32_t link_speed_mask = + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; HWRM_PREP(req, PORT_PHY_CFG, -1, resp); @@ -487,14 +495,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) * any auto mode, even "none". */ if (!conf->link_speed) { - req.auto_mode |= conf->auto_mode; - enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; - req.auto_link_speed_mask = conf->auto_link_speed_mask; - enables |= - HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; - req.auto_link_speed = bp->link_info.auto_link_speed; - enables |= + req.auto_mode = conf->auto_mode; + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; + if (conf->auto_mode == + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) { + req.auto_link_speed_mask = + conf->auto_link_speed_mask; + enables |= link_speed_mask; + } + if (bp->link_info.auto_link_speed) { + req.auto_link_speed = + bp->link_info.auto_link_speed; + enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; + } } req.auto_duplex = conf->duplex; enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; @@ -534,13 +548,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, HWRM_CHECK_RESULT; link_info->phy_link_status = resp->link; - if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) { - link_info->link_up = 1; - link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - } else { - link_info->link_up = 0; - link_info->link_speed = 0; - } + link_info->link_up = + (link_info->phy_link_status == + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; + link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); link_info->duplex = resp->duplex; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; @@ -829,11 +840,14 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_ALLOC, -1, resp); + if (vnic->func_default) + req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); + RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -843,6 +857,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_CFG, -1, resp); /* Only RSS support for now TBD: COS & LB */ @@ -885,6 +904,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_CHECK_RESULT; vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx); return rc; } @@ -896,6 +916,12 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + if (vnic->fw_rss_cos_lb_ctx == 0xffff) { + RTE_LOG(DEBUG, PMD, + "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx); + return rc; + } + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp); req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); @@ -915,8 +941,10 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_free_input req = {.req_type = 0 }; struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; - if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id); return rc; + } HWRM_PREP(req, VNIC_FREE, -1, resp); @@ -1335,12 +1363,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) return 0; } -static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed) +static uint16_t +bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed) { uint16_t ret = 0; - if (link_speed == ETH_LINK_SPEED_AUTONEG) + if (link_speed == ETH_LINK_SPEED_AUTONEG) { + if (bp->link_info.support_speeds) + return bp->link_info.support_speeds; link_speed = BNXT_SUPPORTED_SPEEDS; + } if (link_speed & ETH_LINK_SPEED_100M) ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; @@ -1432,16 +1464,16 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) "Get link config failed with rc %d\n", rc); goto exit; } - if (link_info->link_up) + if (link_info->link_speed) link->link_speed = bnxt_parse_hw_link_speed(link_info->link_speed); else - link->link_speed = ETH_LINK_SPEED_10M; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex); link->link_status = link_info->link_up; link->link_autoneg = link_info->auto_mode == HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ? - ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG; + ETH_LINK_FIXED : ETH_LINK_AUTONEG; exit: return rc; } @@ -1474,7 +1506,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK; link_req.auto_link_speed_mask = - bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds); + bnxt_parse_eth_link_speed_mask(bp, + dev_conf->link_speeds); } else { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; link_req.link_speed = speed; @@ -1491,7 +1524,6 @@ port_phy_cfg: "Set link config failed with rc %d\n", rc); } - rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); error: return rc; } diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 2f7ae70c..b4a1e727 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -435,7 +435,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id) * In other case (was fast and now it is slow) just switch * timeout to slow without forcing send of LACP (because standard * say so)*/ - if (!is_partner_fast) + if (is_partner_fast) SM_FLAG_SET(port, NTT); } else return; /* Nothing changed */ @@ -758,7 +758,7 @@ bond_mode_8023ad_periodic_cb(void *arg) uint16_t key; slave_id = internals->active_slaves[i]; - rte_eth_link_get(slave_id, &link_info); + rte_eth_link_get_nowait(slave_id, &link_info); rte_eth_macaddr_get(slave_id, &slave_addr); if (link_info.link_status != 0) { diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index aa71e3f8..7811a5ac 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -643,7 +643,7 @@ bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx, { struct rte_eth_link link_status; - rte_eth_link_get(port_id, &link_status); + rte_eth_link_get_nowait(port_id, &link_status); uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8; if (link_bwg == 0) return; @@ -1667,6 +1667,8 @@ static void bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bond_dev_private *internals = dev->data->dev_private; + uint16_t max_nb_rx_queues = UINT16_MAX; + uint16_t max_nb_tx_queues = UINT16_MAX; dev_info->max_mac_addrs = 1; @@ -1674,8 +1676,29 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) ? internals->candidate_max_rx_pktlen : ETHER_MAX_JUMBO_FRAME_LEN; - dev_info->max_rx_queues = (uint16_t)128; - dev_info->max_tx_queues = (uint16_t)512; + if (internals->slave_count > 0) { + /* Max number of tx/rx queues that the bonded device can + * support is the minimum values of the bonded slaves, as + * all slaves must be capable of supporting the same number + * of tx/rx queues. + */ + struct rte_eth_dev_info slave_info; + uint8_t idx; + + for (idx = 0; idx < internals->slave_count; idx++) { + rte_eth_dev_info_get(internals->slaves[idx].port_id, + &slave_info); + + if (slave_info.max_rx_queues < max_nb_rx_queues) + max_nb_rx_queues = slave_info.max_rx_queues; + + if (slave_info.max_tx_queues < max_nb_tx_queues) + max_nb_tx_queues = slave_info.max_tx_queues; + } + } + + dev_info->max_rx_queues = max_nb_rx_queues; + dev_info->max_tx_queues = max_nb_tx_queues; dev_info->min_rx_bufsize = 0; dev_info->pci_dev = NULL; diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 9dca8da1..19afdac9 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -2136,6 +2136,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); #define GET_STAT(name) \ t4_read_reg64(adap, \ @@ -2168,6 +2169,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & F_COUNTPAUSESTATTX) { + p->tx_frames -= p->tx_pause; + p->tx_octets -= p->tx_pause * 64; + } + if (stat_ctl & F_COUNTPAUSEMCTX) + p->tx_mcast_frames -= p->tx_pause; + } + p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); @@ -2195,6 +2205,16 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { + if (stat_ctl & F_COUNTPAUSESTATRX) { + p->rx_frames -= p->rx_pause; + p->rx_octets -= p->rx_pause * 64; + } + if (stat_ctl & F_COUNTPAUSEMCRX) + p->rx_mcast_frames -= p->rx_pause; + } + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h index 9057e409..9c132dcd 100644 --- a/drivers/net/cxgbe/base/t4_regs.h +++ b/drivers/net/cxgbe/base/t4_regs.h @@ -553,6 +553,24 @@ #define V_VF(x) ((x) << S_VF) #define G_VF(x) (((x) >> S_VF) & M_VF) +#define A_MPS_STAT_CTL 0x9600 + +#define S_COUNTPAUSEMCRX 5 +#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX) +#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U) + +#define S_COUNTPAUSESTATRX 4 +#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX) +#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U) + +#define S_COUNTPAUSEMCTX 3 +#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX) +#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U) + +#define S_COUNTPAUSESTATTX 2 +#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX) +#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U) + #define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 #define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 #define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index b7f28ebb..06437c1b 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -654,8 +654,6 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, cxgbe_stats_get(pi, &ps); /* RX Stats */ - eth_stats->ipackets = ps.rx_frames; - eth_stats->ibytes = ps.rx_octets; eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + ps.rx_ovflow2 + ps.rx_ovflow3 + ps.rx_trunc0 + ps.rx_trunc1 + @@ -675,6 +673,8 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, eth_stats->q_ipackets[i] = rxq->stats.pkts; eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + eth_stats->ipackets += eth_stats->q_ipackets[i]; + eth_stats->ibytes += eth_stats->q_ibytes[i]; } for (i = 0; i < pi->n_tx_qsets; i++) { diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 6c25c8da..d648789c 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -82,7 +82,7 @@ #define E1000_MAX_FLEX_FILTER_DWDS \ (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t)) #define E1000_FLEX_FILTERS_MASK_SIZE \ - (E1000_MAX_FLEX_FILTER_DWDS / 4) + (E1000_MAX_FLEX_FILTER_DWDS / 2) #define E1000_FHFT_QUEUEING_LEN 0x0000007F #define E1000_FHFT_QUEUEING_QUEUE 0x00000700 #define E1000_FHFT_QUEUEING_PRIO 0x00070000 diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index be2600d4..9cf619fa 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -129,7 +129,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); -static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); static int eth_igb_interrupt_action(struct rte_eth_dev *dev); @@ -1381,7 +1381,9 @@ eth_igb_start(struct rte_eth_dev *dev) if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) - eth_igb_lsc_interrupt_setup(dev); + eth_igb_lsc_interrupt_setup(dev, TRUE); + else + eth_igb_lsc_interrupt_setup(dev, FALSE); } else { rte_intr_callback_unregister(intr_handle, eth_igb_interrupt_handler, @@ -2543,18 +2545,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) * * @param dev * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable * * @return * - On success, zero. * - On failure, a negative value. */ static int -eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) +eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) { struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - intr->mask |= E1000_ICR_LSC; + if (on) + intr->mask |= E1000_ICR_LSC; + else + intr->mask &= ~E1000_ICR_LSC; return 0; } @@ -3736,10 +3743,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, } wufc = E1000_READ_REG(hw, E1000_WUFC); - if (flex_filter->index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(flex_filter->index); - else - reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); if (add) { if (eth_igb_flex_filter_lookup(&filter_info->flex_list, @@ -3769,6 +3772,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return -ENOSYS; } + if (flex_filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(flex_filter->index); + else + reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << flex_filter->index)); queueing = filter->len | @@ -3797,6 +3805,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return -ENOENT; } + if (it->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(it->index); + else + reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); E1000_WRITE_REG(hw, E1000_WUFC, wufc & diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index dbd37acc..556d4605 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -2179,9 +2179,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev) /* Enable both L3/L4 rx checksum offload */ if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | + E1000_RXCSUM_CRCOFL); else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | + E1000_RXCSUM_CRCOFL); E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 1fc3654e..6efe0c3c 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -677,11 +677,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring) static void ena_tx_queue_release_bufs(struct ena_ring *ring) { - unsigned int ring_mask = ring->ring_size - 1; + unsigned int i; - while (ring->next_to_clean != ring->next_to_use) { - struct ena_tx_buffer *tx_buf = - &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; + for (i = 0; i < ring->ring_size; ++i) { + struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); @@ -1683,6 +1682,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Free whole mbuf chain */ mbuf = tx_info->mbuf; rte_pktmbuf_free(mbuf); + tx_info->mbuf = NULL; /* Put back descriptor to the ring for reuse */ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index 84e4840a..1cd031ac 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -645,7 +645,7 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1 = 0; + u64 a0 = 0, a1 = 0; int wait = 1000; int err, i; @@ -1021,7 +1021,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter_v2 *data) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; dma_addr_t tlv_pa; int ret = -EINVAL; diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 912ea157..1934f8ba 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -426,7 +426,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) tail_idx = enic_ring_incr(desc_count, tail_idx); } - rte_mempool_put_bulk(pool, (void **)free, nb_free); + if (nb_free > 0) + rte_mempool_put_bulk(pool, (void **)free, nb_free); wq->tail_idx = tail_idx; wq->ring.desc_avail += nb_to_free; diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 32b0ea93..d04efdc6 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -83,6 +83,7 @@ static void fm10k_rx_queue_release(void *queue); static void fm10k_set_rx_function(struct rte_eth_dev *dev); static void fm10k_set_tx_function(struct rte_eth_dev *dev); static int fm10k_check_ftag(struct rte_devargs *devargs); +static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); struct fm10k_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -1164,6 +1165,8 @@ fm10k_dev_start(struct rte_eth_dev *dev) if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); + fm10k_link_update(dev, 0); + return 0; } diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h index fd0a7230..caa2e1eb 100644 --- a/drivers/net/i40e/base/i40e_register.h +++ b/drivers/net/i40e/base/i40e_register.h @@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLV_RUPP_MAX_INDEX 383 #define I40E_GLV_RUPP_RUPP_SHIFT 0 #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) -#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_TEPC_MAX_INDEX 383 #define I40E_GLV_TEPC_TEPC_SHIFT 0 #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 4e4cd16a..65e10f3b 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1591,11 +1591,15 @@ i40e_parse_link_speeds(uint16_t link_speeds) static int i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, - uint8_t force_speed) + uint8_t force_speed, + bool is_up) { enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp phy_ab; struct i40e_aq_set_phy_config phy_conf; + enum i40e_aq_phy_type cnt; + uint32_t phy_type_mask = 0; + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_PAUSE_RX | @@ -1613,6 +1617,10 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (status) return ret; + /* If link already up, no need to set up again */ + if (is_up && phy_ab.phy_type != 0) + return I40E_SUCCESS; + memset(&phy_conf, 0, sizeof(phy_conf)); /* bits 0-2 use the values from get_phy_abilities_resp */ @@ -1623,13 +1631,21 @@ i40e_phy_conf_link(struct i40e_hw *hw, if (abilities & I40E_AQ_PHY_AN_ENABLED) phy_conf.link_speed = advt; else - phy_conf.link_speed = force_speed; + phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed; phy_conf.abilities = abilities; + + + /* To enable link, phy_type mask needs to include each type */ + for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++) + phy_type_mask |= 1 << cnt; + /* use get_phy_abilities_resp value for the rest */ - phy_conf.phy_type = phy_ab.phy_type; - phy_conf.phy_type_ext = phy_ab.phy_type_ext; + phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0; + phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR | + I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR | + I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0; phy_conf.fec_config = phy_ab.mod_type_ext; phy_conf.eee_capability = phy_ab.eee_capability; phy_conf.eeer = phy_ab.eeer_val; @@ -1661,13 +1677,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_AN_ENABLED; abilities |= I40E_AQ_PHY_LINK_ENABLED; - /* Skip changing speed on 40G interfaces, FW does not support */ - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { - speed = I40E_LINK_SPEED_UNKNOWN; - abilities |= I40E_AQ_PHY_AN_ENABLED; - } - - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, true); } static int @@ -1992,7 +2002,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; - return i40e_phy_conf_link(hw, abilities, speed); + return i40e_phy_conf_link(hw, abilities, speed, false); } int @@ -2096,6 +2106,10 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx), vsi->offset_loaded, &oes->rx_broadcast, &nes->rx_broadcast); + /* exclude CRC bytes */ + nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast + + nes->rx_broadcast) * ETHER_CRC_LEN; + i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded, &oes->rx_discards, &nes->rx_discards); /* GLV_REPC not supported */ @@ -2115,6 +2129,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi) i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx), vsi->offset_loaded, &oes->tx_broadcast, &nes->tx_broadcast); + /* exclude CRC bytes */ + nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast + + nes->tx_broadcast) * ETHER_CRC_LEN; /* GLV_TDPC not supported */ i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded, &oes->tx_errors, &nes->tx_errors); @@ -2146,6 +2163,19 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */ struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */ + /* Get rx/tx bytes of internal transfer packets */ + i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port), + I40E_GLV_GORCL(hw->port), + pf->offset_loaded, + &pf->internal_rx_bytes_offset, + &pf->internal_rx_bytes); + + i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port), + I40E_GLV_GOTCL(hw->port), + pf->offset_loaded, + &pf->internal_tx_bytes_offset, + &pf->internal_tx_bytes); + /* Get statistics of struct i40e_eth_stats */ i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), @@ -2167,7 +2197,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) * so subtract ETHER_CRC_LEN from the byte counter for each rx packet. */ ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + - ns->eth.rx_broadcast) * ETHER_CRC_LEN; + ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes; i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, @@ -2195,7 +2225,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &os->eth.tx_broadcast, &ns->eth.tx_broadcast); ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + - ns->eth.tx_broadcast) * ETHER_CRC_LEN; + ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes; /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -3992,6 +4022,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) if (enabled_tcmap & (1 << i)) total_tc++; + if (total_tc == 0) + total_tc = 1; vsi->enabled_tc = enabled_tcmap; /* Number of queues per enabled TC */ @@ -4920,6 +4952,10 @@ i40e_pf_setup(struct i40e_pf *pf) pf->offset_loaded = FALSE; memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats)); + pf->internal_rx_bytes = 0; + pf->internal_tx_bytes = 0; + pf->internal_rx_bytes_offset = 0; + pf->internal_tx_bytes_offset = 0; ret = i40e_pf_get_switch_config(pf); if (ret != I40E_SUCCESS) { @@ -8317,8 +8353,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) */ /* For both X710 and XL710 */ -#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200 -#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200 +#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 @@ -8370,8 +8407,12 @@ i40e_configure_registers(struct i40e_hw *hw) reg_table[i].val = I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; else /* For X710/XL710/XXV710 */ - reg_table[i].val = - I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE; + if (hw->aq.fw_maj_ver < 6) + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1; + else + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2; } if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 5f3ecd9a..f2833197 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -431,6 +431,11 @@ struct i40e_pf { struct i40e_hw_port_stats stats_offset; struct i40e_hw_port_stats stats; + /* internal packet byte count, it should be excluded from the total */ + uint64_t internal_rx_bytes; + uint64_t internal_tx_bytes; + uint64_t internal_rx_bytes_offset; + uint64_t internal_tx_bytes_offset; bool offset_loaded; struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 335bf15c..e78610b8 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -294,8 +294,12 @@ i40e_fdir_teardown(struct i40e_pf *pf) vsi = pf->fdir.fdir_vsi; if (!vsi) return; - i40e_switch_tx_queue(hw, vsi->base_queue, FALSE); - i40e_switch_rx_queue(hw, vsi->base_queue, FALSE); + int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE); + if (err) + PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off"); + err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE); + if (err) + PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off"); i40e_dev_rx_queue_release(pf->fdir.rxq); pf->fdir.rxq = NULL; i40e_dev_tx_queue_release(pf->fdir.txq); diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 602e40c9..ba33b2a5 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2324,7 +2324,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) case I40E_FLAG_HEADER_SPLIT_DISABLED: default: rxq->rx_hdr_len = 0; - rxq->rx_buf_len = RTE_ALIGN(buf_size, + rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); rxq->hs_mode = i40e_header_split_none; break; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index d6686f6c..f994fedc 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -5044,6 +5044,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) if (ixgbe_vmdq_mode_check(hw) < 0) return -ENOTSUP; + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + memset(&mr_info->mr_conf[rule_id], 0, sizeof(struct rte_eth_mirror_conf)); @@ -5197,7 +5200,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, tmp |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); } else if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -5303,6 +5307,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); break; default: diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 83f9143b..4a03d342 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -3157,6 +3157,13 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) NB_SEGS(rep) = 0x2a; PORT(rep) = 0x2a; rep->ol_flags = -1; + /* + * Clear special flags in mbuf to avoid + * crashing while freeing. + */ + rep->ol_flags &= + ~(uint64_t)(IND_ATTACHED_MBUF | + CTRL_MBUF_FLAG); #endif assert(rep->buf_len == seg->buf_len); /* Reconfigure sge to use rep instead of seg. */ @@ -5618,8 +5625,10 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_dev = list[i]; DEBUG("device opened"); - if (ibv_query_device(attr_ctx, &device_attr)) + if (ibv_query_device(attr_ctx, &device_attr)) { + err = EINVAL; goto error; + } INFO("%u port(s) detected", device_attr.phys_port_cnt); for (i = 0; i < device_attr.phys_port_cnt; i++) { @@ -5645,19 +5654,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("using port %u (%08" PRIx32 ")", port, test); ctx = ibv_open_device(ibv_dev); - if (ctx == NULL) + if (ctx == NULL) { + err = ENODEV; goto port_error; + } /* Check port status. */ err = ibv_query_port(ctx, port, &port_attr); if (err) { ERROR("port query failed: %s", strerror(err)); + err = ENODEV; goto port_error; } if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { ERROR("port %d is not configured in Ethernet mode", port); + err = EINVAL; goto port_error; } @@ -5694,6 +5707,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) #ifdef HAVE_EXP_QUERY_DEVICE if (ibv_exp_query_device(ctx, &exp_device_attr)) { ERROR("ibv_exp_query_device() failed"); + err = ENODEV; goto port_error; } #ifdef RSS_SUPPORT @@ -5769,6 +5783,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) if (priv_get_mac(priv, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx4_en loaded?" " (errno: %s)", strerror(errno)); + err = ENODEV; goto port_error; } INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 0aa274e6..ca981a53 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -119,6 +119,7 @@ struct ethtool_link_settings { #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 #endif +#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX) /** * Return private structure associated with an Ethernet device. @@ -754,9 +755,12 @@ static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) { struct priv *priv = mlx5_get_priv(dev); - struct ethtool_link_settings edata = { - .cmd = ETHTOOL_GLINKSETTINGS, - }; + __extension__ struct { + struct ethtool_link_settings edata; + uint32_t link_mode_data[3 * + ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32]; + } ecmd; + struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; @@ -769,15 +773,23 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); - ifr.ifr_data = (void *)&edata; + memset(&ecmd, 0, sizeof(ecmd)); + ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS; + ifr.ifr_data = (void *)&ecmd; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", strerror(errno)); return -1; } - dev_link.link_speed = edata.speed; - sc = edata.link_mode_masks[0] | - ((uint64_t)edata.link_mode_masks[1] << 32); + ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { + DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", + strerror(errno)); + return -1; + } + dev_link.link_speed = ecmd.edata.speed; + sc = ecmd.edata.link_mode_masks[0] | + ((uint64_t)ecmd.edata.link_mode_masks[1] << 32); priv->link_speed_capa = 0; if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT) priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; @@ -813,7 +825,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)) priv->link_speed_capa |= ETH_LINK_SPEED_100G; - dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? + dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ? ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c index 1acf6826..d4c19e5a 100644 --- a/drivers/net/mlx5/mlx5_fdir.c +++ b/drivers/net/mlx5/mlx5_fdir.c @@ -142,6 +142,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: desc->src_port = fdir_filter->input.flow.udp4_flow.src_port; desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port; + /* fallthrough */ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; @@ -731,9 +732,11 @@ priv_fdir_disable(struct priv *priv) /* Destroy flow director context in each RX queue. */ for (i = 0; (i != priv->rxqs_n); i++) { - struct rxq_ctrl *rxq_ctrl = - container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq); + struct rxq_ctrl *rxq_ctrl; + if (!(*priv->rxqs)[i]) + continue; + rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq); if (!rxq_ctrl->fdir_queue) continue; priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 118f6d67..aea203b3 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -868,12 +868,16 @@ static inline int rxq_setup(struct rxq_ctrl *tmpl) { struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + struct ibv_mlx5_cq_info cq_info; struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] = rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) { + ERROR("Unable to query CQ info. check your OFED."); + return ENOTSUP; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; @@ -881,16 +885,16 @@ rxq_setup(struct rxq_ctrl *tmpl) if (elts == NULL) return ENOMEM; tmpl->rxq.rq_db = rwq->rq.db; - tmpl->rxq.cqe_n = log2above(ibcq->cqe); + tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt); tmpl->rxq.cq_ci = 0; tmpl->rxq.rq_ci = 0; - tmpl->rxq.cq_db = cq->dbrec; + tmpl->rxq.cq_db = cq_info.dbrec; tmpl->rxq.wqes = (volatile struct mlx5_wqe_data_seg (*)[]) (uintptr_t)rwq->rq.buff; tmpl->rxq.cqes = (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; + (uintptr_t)cq_info.buf; tmpl->rxq.elts = elts; return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index c2863671..d0c4dce0 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -115,7 +115,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl) struct rte_mbuf *elt = (*elts)[elts_tail]; assert(elt != NULL); - rte_pktmbuf_free(elt); + rte_pktmbuf_free_seg(elt); #ifndef NDEBUG /* Poisoning. */ memset(&(*elts)[elts_tail], @@ -205,14 +205,18 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) { struct mlx5_qp *qp = to_mqp(tmpl->qp); struct ibv_cq *ibcq = tmpl->cq; - struct mlx5_cq *cq = to_mxxx(cq, cq); + struct ibv_mlx5_cq_info cq_info; - if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) { + ERROR("Unable to query CQ info. check your OFED."); + return ENOTSUP; + } + if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { ERROR("Wrong MLX5_CQE_SIZE environment variable value: " "it should be set to %u", RTE_CACHE_LINE_SIZE); return EINVAL; } - tmpl->txq.cqe_n = log2above(ibcq->cqe); + tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt); tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; tmpl->txq.wqes = (volatile struct mlx5_wqe64 (*)[]) @@ -220,10 +224,10 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt); tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; tmpl->txq.bf_reg = qp->gen_data.bf->reg; - tmpl->txq.cq_db = cq->dbrec; + tmpl->txq.cq_db = cq_info.dbrec; tmpl->txq.cqes = (volatile struct mlx5_cqe (*)[]) - (uintptr_t)cq->active_buf->buf; + (uintptr_t)cq_info.buf; tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n]) ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 23221478..9d782ac7 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -203,9 +203,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, "*********************************\n"); DP_INFO(edev, " DPDK version:%s\n", rte_version()); - DP_INFO(edev, " Chip details : %s%d\n", + DP_INFO(edev, " Chip details : %s %c%d\n", ECORE_IS_BB(edev) ? "BB" : "AH", - CHIP_REV_IS_A0(edev) ? 0 : 1); + 'A' + edev->chip_rev, + (int)edev->chip_metal); snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 452ad2a5..67ebb1e5 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -423,7 +423,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) } } - memset(mz->addr, 0, sizeof(mz->len)); + memset(mz->addr, 0, mz->len); vq->vq_ring_mem = mz->phys_addr; vq->vq_ring_virt_mem = mz->addr; @@ -1477,37 +1477,19 @@ virtio_dev_configure(struct rte_eth_dev *dev) { const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct virtio_hw *hw = dev->data->dev_private; - uint64_t req_features; - int ret; PMD_INIT_LOG(DEBUG, "configure"); - req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES; - if (rxmode->hw_ip_checksum) - req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); - if (rxmode->enable_lro) - req_features |= - (1ULL << VIRTIO_NET_F_GUEST_TSO4) | - (1ULL << VIRTIO_NET_F_GUEST_TSO6); - - /* if request features changed, reinit the device */ - if (req_features != hw->req_guest_features) { - ret = virtio_init_device(dev, req_features); - if (ret < 0) - return ret; - } - if (rxmode->hw_ip_checksum && - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) { + /* Virtio does L4 checksum but not L3! */ + if (rxmode->hw_ip_checksum) { PMD_DRV_LOG(NOTICE, - "rx ip checksum not available on this host"); + "virtio does not support IP checksum"); return -ENOTSUP; } - if (rxmode->enable_lro && - (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) || - !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) { + if (rxmode->enable_lro) { PMD_DRV_LOG(NOTICE, - "lro not available on this host"); + "virtio does not support Large Receive Offload"); return -ENOTSUP; } @@ -1714,8 +1696,8 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO; + DEV_RX_OFFLOAD_UDP_CKSUM; + dev_info->tx_offload_capa = 0; if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) { diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index f018724f..b1bd6234 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -479,7 +479,6 @@ virtio_user_pmd_remove(const char *name) virtio_user_dev_uninit(dev); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); return 0; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index f123df90..aedac6ca 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -831,7 +831,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) struct vmxnet3_hw *hw = dev->data->dev_private; uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; - memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); + else + memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_VLAN_FILTERS); diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 3ded18ee..3056f4ff 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -734,6 +734,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) (int)(rcd - (struct Vmxnet3_RxCompDesc *) rxq->comp_ring.base), rcd->rxdIdx); rte_pktmbuf_free_seg(rxm); + if (rxq->start_seg) { + struct rte_mbuf *start = rxq->start_seg; + + rxq->start_seg = NULL; + rte_pktmbuf_free(start); + } goto rcd_done; } |