aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/fm10k/fm10k_ethdev.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/fm10k/fm10k_ethdev.c
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/fm10k/fm10k_ethdev.c')
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c228
1 files changed, 136 insertions, 92 deletions
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 94237610..541a49b7 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -60,6 +60,13 @@ static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
@@ -444,8 +451,12 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -454,6 +465,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ dev->data->scattered_rx = 0;
+
return 0;
}
@@ -756,7 +769,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- dev->data->dev_conf.rxmode.enable_scatter) {
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -797,52 +810,50 @@ static int
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err = -1;
+ int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
- err = rx_queue_reset(rxq);
- if (err == -ENOMEM) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
- return err;
- } else if (err == -EINVAL) {
- PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
- " %d", err);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled on real
- * hardware, but BEFORE the queue is enabled when using the
- * emulation platform. Do it in both places for now and remove
- * this comment and the following two register writes when the
- * emulation platform is no longer being used.
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- /* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
- if (hw->mac.type == fm10k_mac_pf)
- reg |= FM10K_RXQCTL_PF;
- reg |= FM10K_RXQCTL_ENABLE;
- /* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
- FM10K_WRITE_FLUSH(hw);
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- }
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -852,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- /* Disable RX queue */
- rx_queue_disable(hw, rx_queue_id);
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
- /* Free mbuf and clean HW ring */
- rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -871,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
- int err = 0;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+ q->ops->reset(q);
- q->ops->reset(q);
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
- /* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
-
- /* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
- FM10K_TXDCTL_ENABLE | txdctl);
- FM10K_WRITE_FLUSH(hw);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- err = -1;
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -902,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_disable(hw, tx_queue_id);
- tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1233,13 +1235,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
- * leave the speed undefined since there is no 50Gbps Ethernet.
- */
- dev->data->dev_link.link_speed = 0;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
return 0;
}
@@ -1377,7 +1377,6 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
@@ -1389,17 +1388,12 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
@@ -1412,6 +1406,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1422,7 +1417,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
- .txq_flags = FM10K_SIMPLE_TX_FLAG,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1571,19 +1566,22 @@ static int
fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
PMD_INIT_LOG(ERR, "VLAN stripping is "
"always on in fm10k");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_INIT_LOG(ERR, "VLAN QinQ is not "
"supported in fm10k");
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
@@ -1781,6 +1779,27 @@ mempool_element_size_valid(struct rte_mempool *mp)
return 1;
}
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
+}
+
static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1791,9 +1810,12 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
@@ -1838,6 +1860,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
@@ -1947,6 +1970,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
return 0;
}
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1955,9 +1996,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
@@ -1994,7 +2038,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
- q->txq_flags = conf->txq_flags;
+ q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
@@ -2784,6 +2828,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
.rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
@@ -2860,7 +2906,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
uint16_t tx_ftag_en = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- /* primary process has set the ftag flag and txq_flags */
+ /* primary process has set the ftag flag and offloads */
txq = dev->data->tx_queues[0];
if (fm10k_tx_vec_condition_check(txq)) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
@@ -3237,9 +3283,7 @@ RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(fm10k_init_log);
-static void
-fm10k_init_log(void)
+RTE_INIT(fm10k_init_log)
{
fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
if (fm10k_logtype_init >= 0)