diff options
Diffstat (limited to 'drivers/net/fm10k')
-rw-r--r-- | drivers/net/fm10k/fm10k.h | 10 | ||||
-rw-r--r-- | drivers/net/fm10k/fm10k_ethdev.c | 131 | ||||
-rw-r--r-- | drivers/net/fm10k/fm10k_rxtx.c | 78 |
3 files changed, 146 insertions, 73 deletions
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h index ef307809..dc814855 100644 --- a/drivers/net/fm10k/fm10k.h +++ b/drivers/net/fm10k/fm10k.h @@ -106,9 +106,6 @@ #define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET #define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET -#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - struct fm10k_macvlan_filter_info { uint16_t vlan_num; /* Total VLAN number */ uint16_t mac_num; /* Total mac number */ @@ -329,6 +326,13 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue, int fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); + +int +fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset); + + uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 3ff1b0e0..541a49b7 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -451,8 +451,10 @@ fm10k_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if ((dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_CRC_STRIP) == 0) + /* KEEP_CRC offload flag is not supported by PMD + * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed + */ + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); /* multipe queue mode checking */ @@ -808,52 +810,50 @@ static int fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int err = -1; + int err; uint32_t reg; struct fm10k_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - err = rx_queue_reset(rxq); - if (err == -ENOMEM) { - PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); - return err; - } else if (err == -EINVAL) { - PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" - " %d", err); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; + err = rx_queue_reset(rxq); + if (err == -ENOMEM) { + PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err); + return err; + } else if (err == -EINVAL) { + PMD_INIT_LOG(ERR, "Invalid buffer address alignment :" + " %d", err); + return err; + } - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled on real - * hardware, but BEFORE the queue is enabled when using the - * emulation platform. Do it in both places for now and remove - * this comment and the following two register writes when the - * emulation platform is no longer being used. - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled on real + * hardware, but BEFORE the queue is enabled when using the + * emulation platform. Do it in both places for now and remove + * this comment and the following two register writes when the + * emulation platform is no longer being used. + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - /* Set PF ownership flag for PF devices */ - reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); - if (hw->mac.type == fm10k_mac_pf) - reg |= FM10K_RXQCTL_PF; - reg |= FM10K_RXQCTL_ENABLE; - /* enable RX queue */ - FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); - FM10K_WRITE_FLUSH(hw); + /* Set PF ownership flag for PF devices */ + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id)); + if (hw->mac.type == fm10k_mac_pf) + reg |= FM10K_RXQCTL_PF; + reg |= FM10K_RXQCTL_ENABLE; + /* enable RX queue */ + FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg); + FM10K_WRITE_FLUSH(hw); - /* Setup the HW Rx Head and Tail Descriptor Pointers - * Note: this must be done AFTER the queue is enabled - */ - FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } + /* Setup the HW Rx Head and Tail Descriptor Pointers + * Note: this must be done AFTER the queue is enabled + */ + FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -863,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - /* Disable RX queue */ - rx_queue_disable(hw, rx_queue_id); + /* Disable RX queue */ + rx_queue_disable(hw, rx_queue_id); - /* Free mbuf and clean HW ring */ - rx_queue_clean(dev->data->rx_queues[rx_queue_id]); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } + /* Free mbuf and clean HW ring */ + rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -882,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) /** @todo - this should be defined in the shared code */ #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY; - int err = 0; + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; - - q->ops->reset(q); + q->ops->reset(q); - /* reset head and tail pointers */ - FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); - FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); + /* reset head and tail pointers */ + FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); + FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0); - /* enable TX queue */ - FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), - FM10K_TXDCTL_ENABLE | txdctl); - FM10K_WRITE_FLUSH(hw); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - } else - err = -1; + /* enable TX queue */ + FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), + FM10K_TXDCTL_ENABLE | txdctl); + FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } static int @@ -913,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - tx_queue_disable(hw, tx_queue_id); - tx_queue_clean(dev->data->tx_queues[tx_queue_id]); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } + tx_queue_disable(hw, tx_queue_id); + tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -2837,6 +2828,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .tx_queue_setup = fm10k_tx_queue_setup, .tx_queue_release = fm10k_tx_queue_release, .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_descriptor_status = fm10k_dev_rx_descriptor_status, + .tx_descriptor_status = fm10k_dev_tx_descriptor_status, .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, .reta_update = fm10k_reta_update, @@ -3290,9 +3283,7 @@ RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k); RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map); RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(fm10k_init_log); -static void -fm10k_init_log(void) +RTE_INIT(fm10k_init_log) { fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init"); if (fm10k_logtype_init >= 0) diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index 9320748c..4a5b46ec 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -389,6 +389,84 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return ret; } +int +fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) +{ + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq = rx_queue; + uint16_t nb_hold, trigger_last; + uint16_t desc; + int ret; + + if (unlikely(offset >= rxq->nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset); + return 0; + } + + if (rxq->next_trigger < rxq->alloc_thresh) + trigger_last = rxq->next_trigger + + rxq->nb_desc - rxq->alloc_thresh; + else + trigger_last = rxq->next_trigger - rxq->alloc_thresh; + + if (rxq->next_dd < trigger_last) + nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last; + else + nb_hold = rxq->next_dd - trigger_last; + + if (offset >= rxq->nb_desc - nb_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->next_dd + offset; + if (desc >= rxq->nb_desc) + desc -= rxq->nb_desc; + + rxdp = &rxq->hw_ring[desc]; + + ret = !!(rxdp->w.status & + rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)); + + return ret; +} + +int +fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) +{ + volatile struct fm10k_tx_desc *txdp; + struct fm10k_tx_queue *txq = tx_queue; + uint16_t desc; + uint16_t next_rs = txq->nb_desc; + struct fifo rs_tracker = txq->rs_tracker; + struct fifo *r = &rs_tracker; + + if (unlikely(offset >= txq->nb_desc)) + return -EINVAL; + + desc = txq->next_free + offset; + /* go to next desc that has the RS bit */ + desc = (desc / txq->rs_thresh + 1) * + txq->rs_thresh - 1; + + if (desc >= txq->nb_desc) { + desc -= txq->nb_desc; + if (desc >= txq->nb_desc) + desc -= txq->nb_desc; + } + + r->head = r->list; + for ( ; r->head != r->endp; ) { + if (*r->head >= desc && *r->head < next_rs) + next_rs = *r->head; + ++r->head; + } + + txdp = &txq->hw_ring[next_rs]; + if (txdp->flags & FM10K_TXD_FLAG_DONE) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + /* * Free multiple TX mbuf at a time if they are in the same pool * |