diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethdev.c | 44 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_pf.c | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_rxtx.c | 41 |
3 files changed, 68 insertions, 19 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index f994fedc..73996bbe 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -3230,6 +3230,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); @@ -4250,6 +4251,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ @@ -5013,13 +5023,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -6778,6 +6788,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -6785,19 +6797,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index 09440ccb..a760b1bb 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -263,7 +263,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_MSIX_MODE; + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT; switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { case ETH_64_POOLS: diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index d1e300a5..8b18b530 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -3395,12 +3395,19 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3422,12 +3429,18 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3443,12 +3456,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } @@ -3464,12 +3483,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; + /* Initialize User Priority to Traffic Class mapping */ + for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { + tc = &dcb_config->tc_config[j]; + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0; + } + /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; - tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = - (uint8_t)(1 << j); + tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |= + (uint8_t)(1 << i); } } |