diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/qede/qede_ethdev.c | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/qede/qede_ethdev.c')
-rw-r--r-- | drivers/net/qede/qede_ethdev.c | 618 |
1 files changed, 381 insertions, 237 deletions
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index a91f4368..df52ea92 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "qede_ethdev.h" @@ -16,7 +14,7 @@ int qede_logtype_init; int qede_logtype_driver; static const struct qed_eth_ops *qed_ops; -static int64_t timer_period = 1; +#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ /* VXLAN tunnel classification mapping */ const struct _qede_udp_tunn_types { @@ -339,6 +337,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) } static void +qede_interrupt_handler_intx(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + u64 status; + + /* Check if our device actually raised an interrupt */ + status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); + if (status & 0x1) { + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + + if (rte_intr_enable(eth_dev->intr_handle)) + DP_ERR(edev, "rte_intr_enable failed\n"); + } +} + +static void qede_interrupt_handler(void *param) { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; @@ -499,6 +515,9 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu) return 0; } +#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" +#define QEDE_VF_TX_SWITCHING "vf_tx_switching" + /* Activate or deactivate vport via vport-update */ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) { @@ -515,12 +534,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; - if (!qdev->enable_tx_switching) { - if (IS_VF(edev)) { - params.update_tx_switching_flg = 1; - params.tx_switching_flg = !flg; - DP_INFO(edev, "VF tx-switching is disabled\n"); - } + if (~qdev->enable_tx_switching & flg) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; } for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; @@ -591,38 +607,9 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) } } qdev->enable_lro = flg; - DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); - - return 0; -} - -/* Update MTU via vport-update without doing port restart. - * The vport must be deactivated before calling this API. - */ -int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) -{ - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); - struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_sp_vport_update_params params; - struct ecore_hwfn *p_hwfn; - int rc; - int i; + eth_dev->data->lro = flg; - memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); - params.vport_id = 0; - params.mtu = mtu; - params.vport_id = 0; - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - params.opaque_fid = p_hwfn->hw_info.opaque_fid; - rc = ecore_sp_vport_update(p_hwfn, ¶ms, - ECORE_SPQ_MODE_EBLOCK, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update MTU\n"); - return -1; - } - } - DP_INFO(edev, "MTU updated to %u\n", mtu); + DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); return 0; } @@ -780,6 +767,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, } static int +qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.ip_gre.b_update_mode = true; + tunn.ip_gre.b_mode_enabled = enable; + tunn.ip_gre.tun_cls = clss; + tunn.ip_gre.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->ipgre.enable = enable; + DP_INFO(edev, "IPGRE is %s\n", + enable ? "enabled" : "disabled"); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + +static int qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, enum rte_eth_tunnel_type tunn_type, bool enable) { @@ -792,6 +809,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, case RTE_TUNNEL_TYPE_GENEVE: rc = qede_geneve_enable(eth_dev, clss, enable); break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + rc = qede_ipgre_enable(eth_dev, clss, enable); + break; default: rc = -EINVAL; break; @@ -817,10 +837,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, ETHER_ADDR_LEN) == 0) && ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { - DP_ERR(edev, "Unicast MAC is already added" - " with vlan = %u, vni = %u\n", - ucast->vlan, ucast->vni); - return -EEXIST; + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; } } u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), @@ -854,47 +874,69 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } static int -qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, - bool add) +qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ether_addr *mac_addr; - struct qede_mcast_entry *tmp = NULL; - struct qede_mcast_entry *m; + struct ecore_filter_mcast mcast; + struct qede_mcast_entry *m = NULL; + uint8_t i; + int rc; - mac_addr = (struct ether_addr *)mcast->mac; - if (add) { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { - DP_ERR(edev, - "Multicast MAC is already added\n"); - return -EEXIST; - } - } + for (i = 0; i < mc_addrs_num; i++) { m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), - RTE_CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (!m) { - DP_ERR(edev, - "Did not allocate memory for mcast\n"); + DP_ERR(edev, "Did not allocate memory for mcast\n"); return -ENOMEM; } - ether_addr_copy(mac_addr, &m->mac); + ether_addr_copy(&mc_addrs[i], &m->mac); SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); - qdev->num_mc_addr++; - } else { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) - break; - } - if (tmp == NULL) { - DP_INFO(edev, "Multicast mac is not found\n"); - return -EINVAL; - } - SLIST_REMOVE(&qdev->mc_list_head, tmp, - qede_mcast_entry, list); - qdev->num_mc_addr--; } + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = mc_addrs_num; + mcast.opcode = ECORE_FILTER_ADD; + for (i = 0; i < mc_addrs_num; i++) + ether_addr_copy(&mc_addrs[i], (struct ether_addr *) + &mcast.mac[i]); + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); + return -1; + } + + return 0; +} + +static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_mcast_entry *tmp = NULL; + struct ecore_filter_mcast mcast; + int j; + int rc; + + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = qdev->num_mc_addr; + mcast.opcode = ECORE_FILTER_REMOVE; + j = 0; + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]); + j++; + } + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to delete multicast filter\n"); + return -1; + } + /* Init the list */ + while (!SLIST_EMPTY(&qdev->mc_list_head)) { + tmp = SLIST_FIRST(&qdev->mc_list_head); + SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); + } + SLIST_INIT(&qdev->mc_list_head); return 0; } @@ -905,59 +947,25 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - enum _ecore_status_t rc; - struct ecore_filter_mcast mcast; - struct qede_mcast_entry *tmp; - uint16_t j = 0; + enum _ecore_status_t rc = ECORE_INVAL; - /* Multicast */ - if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { - if (add) { - if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { - DP_ERR(edev, - "Mcast filter table limit exceeded, " - "Please enable mcast promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_mcast_filter(eth_dev, ucast, add); - if (rc == 0) { - DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); - memset(&mcast, 0, sizeof(mcast)); - mcast.num_mc_addrs = qdev->num_mc_addr; - mcast.opcode = ECORE_FILTER_ADD; - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - ether_addr_copy(&tmp->mac, - (struct ether_addr *)&mcast.mac[j]); - j++; - } - rc = ecore_filter_mcast_cmd(edev, &mcast, - ECORE_SPQ_MODE_CB, NULL); - } - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to add multicast filter" - " rc = %d, op = %d\n", rc, add); - } - } else { /* Unicast */ - if (add) { - if (qdev->num_uc_addr >= - qdev->dev_info.num_mac_filters) { - DP_ERR(edev, - "Ucast filter table limit exceeded," - " Please enable promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_ucast_filter(eth_dev, ucast, add); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, ucast, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", - rc, add); - } + if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { + DP_ERR(edev, "Ucast filter table limit exceeded," + " Please enable promisc mode\n"); + return ECORE_INVAL; } + rc = qede_ucast_filter(eth_dev, ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, ucast, + ECORE_SPQ_MODE_CB, NULL); + /* Indicate error only for add filter operation. + * Delete filter operations are not severe. + */ + if ((rc != ECORE_SUCCESS) && add) + DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", + rc, add); + return rc; } @@ -968,7 +976,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, struct ecore_filter_ucast ucast; int re; + if (!is_valid_assigned_ether_addr(mac_addr)) + return -EINVAL; + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; ucast.type = ECORE_FILTER_MAC; ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); @@ -990,6 +1002,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) return; } + if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) + return; + qede_set_ucast_cmn_params(&ucast); ucast.opcode = ECORE_FILTER_REMOVE; ucast.type = ECORE_FILTER_MAC; @@ -998,10 +1013,10 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) ether_addr_copy(ð_dev->data->mac_addrs[index], (struct ether_addr *)&ucast.mac); - ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); + qede_mac_int_ops(eth_dev, &ucast, false); } -static void +static int qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); @@ -1010,12 +1025,12 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), mac_addr->addr_bytes)) { DP_ERR(edev, "Setting MAC address is not allowed\n"); - ether_addr_copy(&qdev->primary_mac, - ð_dev->data->mac_addrs[0]); - return; + return -EPERM; } - qede_mac_addr_add(eth_dev, mac_addr, 0, 0); + qede_mac_addr_remove(eth_dev, 0); + + return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); } static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) @@ -1093,9 +1108,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { if (tmp->vid == vlan_id) { - DP_ERR(edev, "VLAN %u already configured\n", - vlan_id); - return -EEXIST; + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; } } @@ -1166,10 +1181,10 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (rxmode->hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); @@ -1177,7 +1192,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) if (mask & ETH_VLAN_FILTER_MASK) { /* VLAN filtering kicks in when a VLAN is added */ - if (rxmode->hw_vlan_filter) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { qede_vlan_filter_set(eth_dev, 0, 1); } else { if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ @@ -1187,7 +1202,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) /* Signal app that VLAN filtering is still * enabled */ - rxmode->hw_vlan_filter = true; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; } else { qede_vlan_filter_set(eth_dev, 0, 0); } @@ -1195,13 +1211,11 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_EXTEND_MASK) - DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" - " and classification is based on outer tag only\n"); + DP_ERR(edev, "Extend VLAN not supported\n"); qdev->vlan_offload_mask = mask; - DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", - mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); + DP_INFO(edev, "VLAN offload mask %d\n", mask); return 0; } @@ -1267,19 +1281,25 @@ static void qede_fastpath_start(struct ecore_dev *edev) static int qede_dev_start(struct rte_eth_dev *eth_dev) { - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(edev); + /* Update MTU only if it has changed */ + if (eth_dev->data->mtu != qdev->mtu) { + if (qede_update_mtu(eth_dev, qdev->mtu)) + goto err; + } + /* Configure TPA parameters */ - if (rxmode->enable_lro) { + if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { if (qede_enable_tpa(eth_dev, true)) return -EINVAL; /* Enable scatter mode for LRO */ - if (!rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; + if (!eth_dev->data->scattered_rx) + rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; } /* Start queues */ @@ -1294,7 +1314,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ - if (rxmode->mq_mode == ETH_MQ_RX_RSS) + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) goto err; @@ -1339,10 +1359,9 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) DP_INFO(edev, "Device is stopped\n"); } -#define QEDE_TX_SWITCHING "vf_txswitch" - const char *valid_args[] = { - QEDE_TX_SWITCHING, + QEDE_NPAR_TX_SWITCHING, + QEDE_VF_TX_SWITCHING, NULL, }; @@ -1361,8 +1380,13 @@ static int qede_args_check(const char *key, const char *val, void *opaque) return errno; } - if (strcmp(QEDE_TX_SWITCHING, key) == 0) + if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || + ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { qdev->enable_tx_switching = !!tmp; + DP_INFO(edev, "Disabling %s tx-switching\n", + strcmp(QEDE_NPAR_TX_SWITCHING, key) ? + "VF" : "NPAR"); + } return ret; } @@ -1411,15 +1435,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Check requirements for 100G mode */ if (ECORE_IS_CMT(edev)) { if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { + eth_dev->data->nb_tx_queues < 2) { DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); return -EINVAL; } if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { + (eth_dev->data->nb_tx_queues % 2 != 0)) { DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); + "100G mode needs even no. of RX/TX queues\n"); return -EINVAL; } } @@ -1437,22 +1461,11 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Parse devargs and fix up rxmode */ if (qede_args(eth_dev)) - return -ENOTSUP; + DP_NOTICE(edev, false, + "Invalid devargs supplied, requested change will not take effect\n"); - /* Sanity checks and throw warnings */ - if (rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; - - if (!rxmode->hw_strip_crc) - DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); - - if (!rxmode->hw_ip_checksum) - DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " - "in hw\n"); - if (rxmode->header_split) - DP_INFO(edev, "Header split enable is not supported\n"); - if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == - ETH_MQ_RX_RSS)) { + if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || + rxmode->mq_mode == ETH_MQ_RX_RSS)) { DP_ERR(edev, "Unsupported multi-queue mode\n"); return -ENOTSUP; } @@ -1467,19 +1480,22 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOMEM; /* If jumbo enabled adjust MTU */ - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) eth_dev->data->mtu = - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; if (qede_start_vport(qdev, eth_dev->data->mtu)) return -1; + qdev->mtu = eth_dev->data->mtu; /* Enable VLAN offloads by default */ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK); + ETH_VLAN_FILTER_MASK); if (ret) return ret; @@ -1515,7 +1531,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, PMD_INIT_FUNC_TRACE(edev); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; @@ -1534,26 +1549,42 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; - - dev_info->default_txconf = (struct rte_eth_txconf) { - .txq_flags = QEDE_TXQ_FLAGS, - }; - - dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | + dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO); - + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); + dev_info->rx_queue_offload_capa = 0; + + /* TX offloads are on a per-packet basis, so it is applicable + * to both at port and queue levels. + */ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, + }; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + /* Packets are always dropped if no descriptors are available */ + .rx_drop_en = 1, + .offloads = 0, + }; memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1578,18 +1609,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + struct qed_link_output q_link; + struct rte_eth_link link; uint16_t link_duplex; - struct qed_link_output link; - struct rte_eth_link *curr = ð_dev->data->dev_link; - memset(&link, 0, sizeof(struct qed_link_output)); - qdev->ops->common->get_link(edev, &link); + memset(&q_link, 0, sizeof(q_link)); + memset(&link, 0, sizeof(link)); + + qdev->ops->common->get_link(edev, &q_link); /* Link Speed */ - curr->link_speed = link.speed; + link.link_speed = q_link.speed; /* Link Mode */ - switch (link.duplex) { + switch (q_link.duplex) { case QEDE_DUPLEX_HALF: link_duplex = ETH_LINK_HALF_DUPLEX; break; @@ -1600,21 +1633,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) default: link_duplex = -1; } - curr->link_duplex = link_duplex; + link.link_duplex = link_duplex; /* Link Status */ - curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; + link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; /* AN */ - curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? + link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", - curr->link_speed, curr->link_duplex, - curr->link_autoneg, curr->link_status); + link.link_speed, link.link_duplex, + link.link_autoneg, link.link_status); - /* return 0 means link status changed, -1 means not changed */ - return ((curr->link_status == link.link_up) ? -1 : 0); + return rte_eth_linkstatus_set(eth_dev, &link); } static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) @@ -1661,7 +1693,7 @@ static void qede_poll_sp_sb_cb(void *param) qede_interrupt_action(ECORE_LEADING_HWFN(edev)); qede_interrupt_action(&edev->hwfns[1]); - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -1700,8 +1732,20 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(&pci_dev->intr_handle); - rte_intr_callback_unregister(&pci_dev->intr_handle, - qede_interrupt_handler, (void *)eth_dev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + rte_intr_callback_unregister(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); } @@ -1990,6 +2034,99 @@ static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) QED_FILTER_RX_MODE_TYPE_REGULAR); } +static int +qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint8_t i; + + if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { + DP_ERR(edev, "Reached max multicast filters limit," + "Please enable multicast promisc mode\n"); + return -ENOSPC; + } + + for (i = 0; i < mc_addrs_num; i++) { + if (!is_multicast_ether_addr(&mc_addrs[i])) { + DP_ERR(edev, "Not a valid multicast MAC\n"); + return -EINVAL; + } + } + + /* Flush all existing entries */ + if (qede_del_mcast_filters(eth_dev)) + return -1; + + /* Set new mcast list */ + return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); +} + +/* Update MTU via vport-update without doing port restart. + * The vport must be deactivated before calling this API. + */ +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + if (IS_PF(edev)) { + struct ecore_sp_vport_update_params params; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.mtu = mtu; + params.vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) + goto err; + } + } else { + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); + if (rc == ECORE_INVAL) { + DP_INFO(edev, "VF MTU Update TLV not supported\n"); + /* Recreate vport */ + rc = qede_start_vport(qdev, mtu); + if (rc != ECORE_SUCCESS) + goto err; + + /* Restore config lost due to vport stop */ + if (eth_dev->data->promiscuous) + qede_promiscuous_enable(eth_dev); + else + qede_promiscuous_disable(eth_dev); + + if (eth_dev->data->all_multicast) + qede_allmulticast_enable(eth_dev); + else + qede_allmulticast_disable(eth_dev); + + qede_vlan_offload_set(eth_dev, + qdev->vlan_offload_mask); + } else if (rc != ECORE_SUCCESS) { + goto err; + } + } + } + DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); + + return 0; + +err: + DP_ERR(edev, "Failed to update MTU\n"); + return -1; +} + static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { @@ -2065,6 +2202,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_TUNNEL_VXLAN, RTE_PTYPE_L4_FRAG, RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_GRE, /* Inner */ RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L2_ETHER_VLAN, @@ -2143,7 +2281,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, vport_update_params.vport_id = 0; /* pass the L2 handles instead of qids */ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { - idx = qdev->rss_ind_table[i]; + idx = i % QEDE_RSS_COUNT(qdev); rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; } vport_update_params.rss_params = &rss_params; @@ -2393,7 +2531,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) restart = true; } rte_delay_ms(1000); - qede_start_vport(qdev, mtu); /* Recreate vport */ qdev->mtu = mtu; /* Fix up RX buf size for all queues of the port */ @@ -2413,23 +2550,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } } if (max_rx_pkt_len > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; - else - dev->data->dev_conf.rxmode.jumbo_frame = 0; - - /* Restore config lost due to vport stop */ - qede_mac_addr_set(dev, &qdev->primary_mac); - if (dev->data->promiscuous) - qede_promiscuous_enable(dev); - else - qede_promiscuous_disable(dev); - - if (dev->data->all_multicast) - qede_allmulticast_enable(dev); + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - qede_allmulticast_disable(dev); - - qede_vlan_offload_set(dev, qdev->vlan_offload_mask); + dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; if (!dev->data->dev_started && restart) { qede_dev_start(dev); @@ -2488,7 +2611,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, ECORE_TUNN_CLSS_MAC_VLAN, false); break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port != tunnel_udp->udp_port) { DP_ERR(edev, "UDP port %u doesn't exist\n", @@ -2578,7 +2700,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->vxlan.udp_port = udp_port; break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port == tunnel_udp->udp_port) { DP_INFO(edev, @@ -2616,7 +2737,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->geneve.udp_port = udp_port; break; - default: return ECORE_INVAL; } @@ -2782,7 +2902,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev, qdev->geneve.filter_type = conf->filter_type; } - if (!qdev->vxlan.enable || !qdev->geneve.enable) + if (!qdev->vxlan.enable || !qdev->geneve.enable || + !qdev->ipgre.enable) return qede_tunn_enable(eth_dev, clss, conf->tunnel_type, true); @@ -2818,15 +2939,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, switch (filter_conf->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_IP_IN_GRE: DP_INFO(edev, "Packet steering to the specified Rx queue" " is not supported with UDP tunneling"); return(qede_tunn_filter_config(eth_dev, filter_op, filter_conf)); - /* Place holders for future tunneling support */ case RTE_TUNNEL_TYPE_TEREDO: case RTE_TUNNEL_TYPE_NVGRE: - case RTE_TUNNEL_TYPE_IP_IN_GRE: case RTE_L2_TUNNEL_TYPE_E_TAG: DP_ERR(edev, "Unsupported tunnel type %d\n", filter_conf->tunnel_type); @@ -2871,6 +2991,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -2911,6 +3032,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -2928,6 +3050,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .mtu_set = qede_set_mtu, .udp_tunnel_port_add = qede_udp_dst_port_add, .udp_tunnel_port_del = qede_udp_dst_port_del, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -2956,6 +3081,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; + uint32_t int_mode; int rc; /* Extract key data structures */ @@ -3000,8 +3126,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) return -ENODEV; } qede_update_pf_params(edev); - rte_intr_callback_register(&pci_dev->intr_handle, - qede_interrupt_handler, (void *)eth_dev); + + switch (pci_dev->intr_handle.type) { + case RTE_INTR_HANDLE_UIO_INTX: + case RTE_INTR_HANDLE_VFIO_LEGACY: + int_mode = ECORE_INT_MODE_INTA; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler_intx, + (void *)eth_dev); + break; + default: + int_mode = ECORE_INT_MODE_MSIX; + rte_intr_callback_register(&pci_dev->intr_handle, + qede_interrupt_handler, + (void *)eth_dev); + } + if (rte_intr_enable(&pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; @@ -3009,7 +3149,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Start the Slowpath-process */ memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = ECORE_INT_MODE_MSIX; + + params.int_mode = int_mode; params.drv_major = QEDE_PMD_VERSION_MAJOR; params.drv_minor = QEDE_PMD_VERSION_MINOR; params.drv_rev = QEDE_PMD_VERSION_REVISION; @@ -3022,7 +3163,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * interrupt vector but we need one for each engine. */ if (ECORE_IS_CMT(edev) && IS_PF(edev)) { - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -3092,7 +3233,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) ECORE_LEADING_HWFN(edev), vf_mac, &is_mac_forced); - if (is_mac_exist && is_mac_forced) { + if (is_mac_exist) { DP_INFO(edev, "VF macaddr received from PF\n"); ether_addr_copy((struct ether_addr *)&vf_mac, ð_dev->data->mac_addrs[0]); @@ -3119,25 +3260,30 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) SLIST_INIT(&adapter->fdir_info.fdir_list_head); SLIST_INIT(&adapter->vlan_list_head); SLIST_INIT(&adapter->uc_list_head); + SLIST_INIT(&adapter->mc_list_head); adapter->mtu = ETHER_MTU; adapter->vport_started = false; /* VF tunnel offloads is enabled by default in PF driver */ adapter->vxlan.num_filters = 0; adapter->geneve.num_filters = 0; + adapter->ipgre.num_filters = 0; if (is_vf) { adapter->vxlan.enable = true; adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; adapter->geneve.enable = true; - adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; + adapter->ipgre.enable = true; + adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; } else { adapter->vxlan.enable = false; adapter->geneve.enable = false; + adapter->ipgre.enable = false; } DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", @@ -3295,9 +3441,7 @@ RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); -RTE_INIT(qede_init_log); -static void -qede_init_log(void) +RTE_INIT(qede_init_log) { qede_logtype_init = rte_log_register("pmd.net.qede.init"); if (qede_logtype_init >= 0) |