diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:16:57 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-02-19 11:17:28 +0000 |
commit | ca33590b6af032bff57d9cc70455660466a654b2 (patch) | |
tree | 0b68b090bd9b4a78a3614b62400b29279d76d553 /drivers/net/qede/qede_ethdev.c | |
parent | 169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff) |
New upstream version 18.02upstream/18.02
Change-Id: I89ed24cb2a49b78fe5be6970b99dd46c1499fcc3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/qede/qede_ethdev.c')
-rw-r--r-- | drivers/net/qede/qede_ethdev.c | 881 |
1 files changed, 613 insertions, 268 deletions
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 6f5ba2a9..a91f4368 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -9,13 +9,17 @@ #include "qede_ethdev.h" #include <rte_alarm.h> #include <rte_version.h> +#include <rte_kvargs.h> /* Globals */ +int qede_logtype_init; +int qede_logtype_driver; + static const struct qed_eth_ops *qed_ops; static int64_t timer_period = 1; /* VXLAN tunnel classification mapping */ -const struct _qede_vxlan_tunn_types { +const struct _qede_udp_tunn_types { uint16_t rte_filter_type; enum ecore_filter_ucast_type qede_type; enum ecore_tunn_clss qede_tunn_clss; @@ -353,7 +357,6 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) qdev->ops = qed_ops; } -#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO static void qede_print_adapter_info(struct qede_dev *qdev) { struct ecore_dev *edev = &qdev->edev; @@ -383,56 +386,115 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, " Firmware file : %s\n", fw_file); DP_INFO(edev, "*********************************\n"); } -#endif -static int -qede_start_vport(struct qede_dev *qdev, uint16_t mtu) +static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) { struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_sp_vport_start_params params; + unsigned int i = 0, j = 0, qid; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; + struct qede_tx_queue *txq; + + DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); + + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for_each_rss(qid) { + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rcv_pkts), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_hw_errors), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_alloc_errors), 0, + sizeof(uint64_t)); + + if (xstats) + for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) + OSAL_MEMSET((((char *) + (qdev->fp_array[qid].rxq)) + + qede_rxq_xstats_strings[j].offset), + 0, + sizeof(uint64_t)); + + i++; + if (i == rxq_stat_cntrs) + break; + } + + i = 0; + + for_each_tss(qid) { + txq = qdev->fp_array[qid].txq; + + OSAL_MEMSET((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, xmit_pkts)), 0, + sizeof(uint64_t)); + + i++; + if (i == txq_stat_cntrs) + break; + } +} + +static int +qede_stop_vport(struct ecore_dev *edev) +{ struct ecore_hwfn *p_hwfn; + uint8_t vport_id; int rc; int i; - memset(¶ms, 0, sizeof(params)); - params.vport_id = 0; - params.mtu = mtu; - /* @DPDK - Disable FW placement */ - params.zero_placement_offset = 1; + vport_id = 0; for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; - params.concrete_fid = p_hwfn->hw_info.concrete_fid; - params.opaque_fid = p_hwfn->hw_info.opaque_fid; - rc = ecore_sp_vport_start(p_hwfn, ¶ms); + rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, + vport_id); if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Start V-PORT failed %d\n", rc); + DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); return rc; } } - ecore_reset_vport_stats(edev); - DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); + + DP_INFO(edev, "vport stopped\n"); return 0; } static int -qede_stop_vport(struct ecore_dev *edev) +qede_start_vport(struct qede_dev *qdev, uint16_t mtu) { + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_start_params params; struct ecore_hwfn *p_hwfn; - uint8_t vport_id; int rc; int i; - vport_id = 0; + if (qdev->vport_started) + qede_stop_vport(edev); + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.mtu = mtu; + /* @DPDK - Disable FW placement */ + params.zero_placement_offset = 1; for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, - vport_id); + params.concrete_fid = p_hwfn->hw_info.concrete_fid; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_start(p_hwfn, ¶ms); if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); + DP_ERR(edev, "Start V-PORT failed %d\n", rc); return rc; } } + ecore_reset_vport_stats(edev); + qdev->vport_started = true; + DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); return 0; } @@ -453,13 +515,13 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; -#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH - if (IS_VF(edev)) { - params.update_tx_switching_flg = 1; - params.tx_switching_flg = !flg; - DP_INFO(edev, "VF tx-switching is disabled\n"); + if (!qdev->enable_tx_switching) { + if (IS_VF(edev)) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; + DP_INFO(edev, "VF tx-switching is disabled\n"); + } } -#endif for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; params.opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -482,8 +544,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, /* Enable LRO in split mode */ sge_tpa_params->tpa_ipv4_en_flg = enable; sge_tpa_params->tpa_ipv6_en_flg = enable; - sge_tpa_params->tpa_ipv4_tunn_en_flg = false; - sge_tpa_params->tpa_ipv6_tunn_en_flg = false; + sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; + sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; /* set if tpa enable changes */ sge_tpa_params->update_tpa_en_flg = 1; /* set if tpa parameters should be handled */ @@ -612,48 +674,127 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, } static int -qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, - bool enable, bool mask) +qede_tunnel_update(struct qede_dev *qdev, + struct ecore_tunnel_info *tunn_info) { - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); enum _ecore_status_t rc = ECORE_INVAL; - struct ecore_ptt *p_ptt; - struct ecore_tunnel_info tunn; struct ecore_hwfn *p_hwfn; + struct ecore_ptt *p_ptt; int i; - memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); - tunn.vxlan.b_update_mode = enable; - tunn.vxlan.b_mode_enabled = mask; - tunn.b_update_rx_cls = true; - tunn.b_update_tx_cls = true; - tunn.vxlan.tun_cls = clss; - for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; if (IS_PF(edev)) { p_ptt = ecore_ptt_acquire(p_hwfn); - if (!p_ptt) + if (!p_ptt) { + DP_ERR(p_hwfn, "Can't acquire PTT\n"); return -EAGAIN; + } } else { p_ptt = NULL; } + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, - &tunn, ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update tunn_clss %u\n", - tunn.vxlan.tun_cls); - if (IS_PF(edev)) - ecore_ptt_release(p_hwfn, p_ptt); + tunn_info, ECORE_SPQ_MODE_CB, NULL); + if (IS_PF(edev)) + ecore_ptt_release(p_hwfn, p_ptt); + + if (rc != ECORE_SUCCESS) break; - } } + return rc; +} + +static int +qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + if (qdev->vxlan.enable == enable) + return ECORE_SUCCESS; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.vxlan.b_update_mode = true; + tunn.vxlan.b_mode_enabled = enable; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + tunn.vxlan.tun_cls = clss; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); if (rc == ECORE_SUCCESS) { qdev->vxlan.enable = enable; qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0; - DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled"); + DP_INFO(edev, "vxlan is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->vxlan.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + tunn.vxlan.tun_cls); + } + + return rc; +} + +static int +qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.l2_geneve.b_update_mode = true; + tunn.l2_geneve.b_mode_enabled = enable; + tunn.ip_geneve.b_update_mode = true; + tunn.ip_geneve.b_mode_enabled = enable; + tunn.l2_geneve.tun_cls = clss; + tunn.ip_geneve.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->geneve.enable = enable; + qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0; + DP_INFO(edev, "GENEVE is %s, UDP port = %d\n", + enable ? "enabled" : "disabled", qdev->geneve.udp_port); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + +static int +qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + enum rte_eth_tunnel_type tunn_type, bool enable) +{ + int rc = -EINVAL; + + switch (tunn_type) { + case RTE_TUNNEL_TYPE_VXLAN: + rc = qede_vxlan_enable(eth_dev, clss, enable); + break; + case RTE_TUNNEL_TYPE_GENEVE: + rc = qede_geneve_enable(eth_dev, clss, enable); + break; + default: + rc = -EINVAL; + break; } return rc; @@ -1057,6 +1198,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" " and classification is based on outer tag only\n"); + qdev->vlan_offload_mask = mask; + DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); @@ -1075,9 +1218,7 @@ static void qede_prandom_bytes(uint32_t *buff) int qede_config_rss(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); -#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); -#endif uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; struct rte_eth_rss_reta_entry64 reta_conf[2]; struct rte_eth_rss_conf rss_conf; @@ -1132,13 +1273,6 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); - /* Update MTU only if it has changed */ - if (qdev->mtu != qdev->new_mtu) { - if (qede_update_mtu(eth_dev, qdev->new_mtu)) - goto err; - qdev->mtu = qdev->new_mtu; - } - /* Configure TPA parameters */ if (rxmode->enable_lro) { if (qede_enable_tpa(eth_dev, true)) @@ -1152,6 +1286,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) if (qede_start_queues(eth_dev)) goto err; + if (IS_PF(edev)) + qede_reset_queue_stats(qdev, true); + /* Newer SR-IOV PF driver expects RX/TX queues to be started before * enabling RSS. Hence RSS configuration is deferred upto this point. * Also, we would like to retain similar behavior in PF case, so we @@ -1165,9 +1302,6 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) if (qede_activate_vport(eth_dev, true)) goto err; - /* Bring-up the link */ - qede_dev_set_link_state(eth_dev, true); - /* Update link status */ qede_link_update(eth_dev, 0); @@ -1202,12 +1336,69 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Disable traffic */ ecore_hw_stop_fastpath(edev); /* TBD - loop */ - /* Bring the link down */ - qede_dev_set_link_state(eth_dev, false); - DP_INFO(edev, "Device is stopped\n"); } +#define QEDE_TX_SWITCHING "vf_txswitch" + +const char *valid_args[] = { + QEDE_TX_SWITCHING, + NULL, +}; + +static int qede_args_check(const char *key, const char *val, void *opaque) +{ + unsigned long tmp; + int ret = 0; + struct rte_eth_dev *eth_dev = opaque; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); + return errno; + } + + if (strcmp(QEDE_TX_SWITCHING, key) == 0) + qdev->enable_tx_switching = !!tmp; + + return ret; +} + +static int qede_args(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_kvargs *kvlist; + struct rte_devargs *devargs; + int ret; + int i; + + devargs = pci_dev->device.devargs; + if (!devargs) + return 0; /* return success */ + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (kvlist == NULL) + return -EINVAL; + + /* Process parameters. */ + for (i = 0; (valid_args[i] != NULL); ++i) { + if (rte_kvargs_count(kvlist, valid_args[i])) { + ret = rte_kvargs_process(kvlist, valid_args[i], + qede_args_check, eth_dev); + if (ret != ECORE_SUCCESS) { + rte_kvargs_free(kvlist); + return ret; + } + } + } + rte_kvargs_free(kvlist); + + return 0; +} + static int qede_dev_configure(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); @@ -1233,6 +1424,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) } } + /* We need to have min 1 RX queue.There is no min check in + * rte_eth_dev_configure(), so we are checking it here. + */ + if (eth_dev->data->nb_rx_queues == 0) { + DP_ERR(edev, "Minimum one RX queue is required\n"); + return -EINVAL; + } + + /* Enable Tx switching by default */ + qdev->enable_tx_switching = 1; + + /* Parse devargs and fix up rxmode */ + if (qede_args(eth_dev)) + return -ENOTSUP; + /* Sanity checks and throw warnings */ if (rxmode->enable_scatter) eth_dev->data->scattered_rx = 1; @@ -1254,34 +1460,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (qede_check_fdir_support(eth_dev)) return -ENOTSUP; - /* Deallocate resources if held previously. It is needed only if the - * queue count has been changed from previous configuration. If its - * going to change then it means RX/TX queue setup will be called - * again and the fastpath pointers will be reinitialized there. - */ - if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues || - qdev->num_rx_queues != eth_dev->data->nb_rx_queues) { - qede_dealloc_fp_resc(eth_dev); - /* Proceed with updated queue count */ - qdev->num_tx_queues = eth_dev->data->nb_tx_queues; - qdev->num_rx_queues = eth_dev->data->nb_rx_queues; - if (qede_alloc_fp_resc(qdev)) - return -ENOMEM; - } + qede_dealloc_fp_resc(eth_dev); + qdev->num_tx_queues = eth_dev->data->nb_tx_queues; + qdev->num_rx_queues = eth_dev->data->nb_rx_queues; + if (qede_alloc_fp_resc(qdev)) + return -ENOMEM; - /* VF's MTU has to be set using vport-start where as - * PF's MTU can be updated via vport-update. - */ - if (IS_VF(edev)) { - if (qede_start_vport(qdev, rxmode->max_rx_pkt_len)) - return -1; - } else { - if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len)) - return -1; - } + /* If jumbo enabled adjust MTU */ + if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; - qdev->mtu = rxmode->max_rx_pkt_len; - qdev->new_mtu = qdev->mtu; + if (qede_start_vport(qdev, eth_dev->data->mtu)) + return -1; + qdev->mtu = eth_dev->data->mtu; /* Enable VLAN offloads by default */ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | @@ -1359,7 +1552,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO); + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO); memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1494,12 +1688,15 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) qede_dev_stop(eth_dev); qede_stop_vport(edev); + qdev->vport_started = false; qede_fdir_dealloc_resc(eth_dev); qede_dealloc_fp_resc(eth_dev); eth_dev->data->nb_rx_queues = 0; eth_dev->data->nb_tx_queues = 0; + /* Bring the link down */ + qede_dev_set_link_state(eth_dev, false); qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(&pci_dev->intr_handle); @@ -1733,6 +1930,7 @@ qede_reset_xstats(struct rte_eth_dev *dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, true); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -1768,6 +1966,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, false); } static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) @@ -1865,6 +2064,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_L4_UDP, RTE_PTYPE_TUNNEL_VXLAN, RTE_PTYPE_L4_FRAG, + RTE_PTYPE_TUNNEL_GENEVE, /* Inner */ RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L2_ETHER_VLAN, @@ -2159,16 +2359,21 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_dev_info dev_info = {0}; struct qede_fastpath *fp; + uint32_t max_rx_pkt_len; uint32_t frame_size; uint16_t rx_buf_size; uint16_t bufsz; + bool restart = false; int i; PMD_INIT_FUNC_TRACE(edev); qede_dev_info_get(dev, &dev_info); - frame_size = mtu + QEDE_ETH_OVERHEAD; + max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { - DP_ERR(edev, "MTU %u out of range\n", mtu); + DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", + mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - + ETHER_CRC_LEN - QEDE_ETH_OVERHEAD); return -EINVAL; } if (!dev->data->scattered_rx && @@ -2182,29 +2387,57 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) */ dev->rx_pkt_burst = qede_rxtx_pkts_dummy; dev->tx_pkt_burst = qede_rxtx_pkts_dummy; - qede_dev_stop(dev); + if (dev->data->dev_started) { + dev->data->dev_started = 0; + qede_dev_stop(dev); + restart = true; + } rte_delay_ms(1000); + qede_start_vport(qdev, mtu); /* Recreate vport */ qdev->mtu = mtu; + /* Fix up RX buf size for all queues of the port */ for_each_rss(i) { fp = &qdev->fp_array[i]; - bufsz = (uint16_t)rte_pktmbuf_data_room_size( - fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; - if (dev->data->scattered_rx) - rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; - else - rx_buf_size = mtu + QEDE_ETH_OVERHEAD; - rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); - fp->rxq->rx_buf_size = rx_buf_size; - DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + if (fp->rxq != NULL) { + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (dev->data->scattered_rx) + rx_buf_size = bufsz + ETHER_HDR_LEN + + ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; + else + rx_buf_size = frame_size; + rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); + fp->rxq->rx_buf_size = rx_buf_size; + DP_INFO(edev, "RX buffer size %u\n", rx_buf_size); + } } - qede_dev_start(dev); - if (frame_size > ETHER_MAX_LEN) + if (max_rx_pkt_len > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* Restore config lost due to vport stop */ + qede_mac_addr_set(dev, &qdev->primary_mac); + if (dev->data->promiscuous) + qede_promiscuous_enable(dev); + else + qede_promiscuous_disable(dev); + + if (dev->data->all_multicast) + qede_allmulticast_enable(dev); + else + qede_allmulticast_disable(dev); + + qede_vlan_offload_set(dev, qdev->vlan_offload_mask); + + if (!dev->data->dev_started && restart) { + qede_dev_start(dev); + dev->data->dev_started = 1; + } + /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; /* Reassign back */ dev->rx_pkt_burst = qede_recv_pkts; dev->tx_pkt_burst = qede_xmit_pkts; @@ -2213,74 +2446,36 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } static int -qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, - struct rte_eth_udp_tunnel *tunnel_udp, - bool add) +qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, + struct rte_eth_udp_tunnel *tunnel_udp) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_tunnel_info tunn; /* @DPDK */ - struct ecore_hwfn *p_hwfn; - struct ecore_ptt *p_ptt; uint16_t udp_port; - int rc, i; + int rc; PMD_INIT_FUNC_TRACE(edev); memset(&tunn, 0, sizeof(tunn)); - if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { - /* Enable VxLAN tunnel if needed before UDP port update using - * default MAC/VLAN classification. - */ - if (add) { - if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { - DP_INFO(edev, - "UDP port %u was already configured\n", - tunnel_udp->udp_port); - return ECORE_SUCCESS; - } - /* Enable VXLAN if it was not enabled while adding - * VXLAN filter. - */ - if (!qdev->vxlan.enable) { - rc = qede_vxlan_enable(eth_dev, - ECORE_TUNN_CLSS_MAC_VLAN, true, true); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to enable VXLAN " - "prior to updating UDP port\n"); - return rc; - } - } - udp_port = tunnel_udp->udp_port; - } else { - if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { - DP_ERR(edev, "UDP port %u doesn't exist\n", - tunnel_udp->udp_port); - return ECORE_INVAL; - } - udp_port = 0; + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; } + udp_port = 0; tunn.vxlan_port.b_update_port = true; tunn.vxlan_port.port = udp_port; - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - if (IS_PF(edev)) { - p_ptt = ecore_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EAGAIN; - } else { - p_ptt = NULL; - } - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unable to config UDP port %u\n", - tunn.vxlan_port.port); - if (IS_PF(edev)) - ecore_ptt_release(p_hwfn, p_ptt); - return rc; - } + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; } qdev->vxlan.udp_port = udp_port; @@ -2288,26 +2483,145 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, * VXLAN filters have reached 0 then VxLAN offload can be be * disabled. */ - if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0) + if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0) return qede_vxlan_enable(eth_dev, - ECORE_TUNN_CLSS_MAC_VLAN, false, true); + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + + udp_port = 0; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u\n", + tunn.vxlan_port.port); + return rc; + } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * GENEVE filters have reached 0 then GENEVE offload can be be + * disabled. + */ + if (qdev->geneve.enable && qdev->geneve.num_filters == 0) + return qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false); + + break; + + default: + return ECORE_INVAL; } return 0; -} -static int -qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, - struct rte_eth_udp_tunnel *tunnel_udp) -{ - return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); } - static int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tunnel_udp) { - return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_tunnel_info tunn; /* @DPDK */ + uint16_t udp_port; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + memset(&tunn, 0, sizeof(tunn)); + + switch (tunnel_udp->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for VXLAN was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable VxLAN tunnel with default MAC/VLAN classification if + * it was not enabled while adding VXLAN filter before UDP port + * update. + */ + if (!qdev->vxlan.enable) { + rc = qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable VXLAN " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port); + + qdev->vxlan.udp_port = udp_port; + break; + + case RTE_TUNNEL_TYPE_GENEVE: + if (qdev->geneve.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u for GENEVE was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + + /* Enable GENEVE tunnel with default MAC/VLAN classification if + * it was not enabled while adding GENEVE filter before UDP port + * update. + */ + if (!qdev->geneve.enable) { + rc = qede_geneve_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable GENEVE " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + + tunn.geneve_port.b_update_port = true; + tunn.geneve_port.port = udp_port; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n", + udp_port); + return rc; + } + + DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port); + + qdev->geneve.udp_port = udp_port; + break; + + default: + return ECORE_INVAL; + } + + return 0; } static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, @@ -2374,113 +2688,116 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, return ECORE_SUCCESS; } -static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, - enum rte_filter_op filter_op, - const struct rte_eth_tunnel_filter_conf *conf) +static int +_qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + const struct rte_eth_tunnel_filter_conf *conf, + __attribute__((unused)) enum rte_filter_op filter_op, + enum ecore_tunn_clss *clss, + bool add) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - enum ecore_filter_ucast_type type; - enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; struct ecore_filter_ucast ucast = {0}; - char str[80]; + enum ecore_filter_ucast_type type; uint16_t filter_type = 0; + char str[80]; int rc; - PMD_INIT_FUNC_TRACE(edev); + filter_type = conf->filter_type; + /* Determine if the given filter classification is supported */ + qede_get_ecore_tunn_params(filter_type, &type, clss, str); + if (*clss == MAX_ECORE_TUNN_CLSS) { + DP_ERR(edev, "Unsupported filter type\n"); + return -EINVAL; + } + /* Init tunnel ucast params */ + rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n", + conf->filter_type); + return rc; + } + DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", + str, filter_op, ucast.type); - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - if (IS_VF(edev)) - return qede_vxlan_enable(eth_dev, - ECORE_TUNN_CLSS_MAC_VLAN, true, true); + ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE; - filter_type = conf->filter_type; - /* Determine if the given filter classification is supported */ - qede_get_ecore_tunn_params(filter_type, &type, &clss, str); - if (clss == MAX_ECORE_TUNN_CLSS) { - DP_ERR(edev, "Unsupported filter type\n"); - return -EINVAL; - } - /* Init tunnel ucast params */ - rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", - conf->filter_type); - return rc; + /* Skip MAC/VLAN if filter is based on VNI */ + if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { + rc = qede_mac_int_ops(eth_dev, &ucast, add); + if ((rc == 0) && add) { + /* Enable accept anyvlan */ + qede_config_accept_any_vlan(qdev, true); } - DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", - str, filter_op, ucast.type); - - ucast.opcode = ECORE_FILTER_ADD; + } else { + rc = qede_ucast_filter(eth_dev, &ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, &ucast, + ECORE_SPQ_MODE_CB, NULL); + } - /* Skip MAC/VLAN if filter is based on VNI */ - if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { - rc = qede_mac_int_ops(eth_dev, &ucast, 1); - if (rc == 0) { - /* Enable accept anyvlan */ - qede_config_accept_any_vlan(qdev, true); - } - } else { - rc = qede_ucast_filter(eth_dev, &ucast, 1); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, &ucast, - ECORE_SPQ_MODE_CB, NULL); - } + return rc; +} - if (rc != ECORE_SUCCESS) - return rc; +static int +qede_tunn_filter_config(struct rte_eth_dev *eth_dev, + enum rte_filter_op filter_op, + const struct rte_eth_tunnel_filter_conf *conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; + bool add; + int rc; - qdev->vxlan.num_filters++; - qdev->vxlan.filter_type = filter_type; - if (!qdev->vxlan.enable) - return qede_vxlan_enable(eth_dev, clss, true, true); + PMD_INIT_FUNC_TRACE(edev); - break; + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + add = true; + break; case RTE_ETH_FILTER_DELETE: - if (IS_VF(edev)) - return qede_vxlan_enable(eth_dev, - ECORE_TUNN_CLSS_MAC_VLAN, false, true); + add = false; + break; + default: + DP_ERR(edev, "Unsupported operation %d\n", filter_op); + return -EINVAL; + } - filter_type = conf->filter_type; - /* Determine if the given filter classification is supported */ - qede_get_ecore_tunn_params(filter_type, &type, &clss, str); - if (clss == MAX_ECORE_TUNN_CLSS) { - DP_ERR(edev, "Unsupported filter type\n"); - return -EINVAL; - } - /* Init tunnel ucast params */ - rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", - conf->filter_type); - return rc; - } - DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", - str, filter_op, ucast.type); + if (IS_VF(edev)) + return qede_tunn_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, + conf->tunnel_type, add); - ucast.opcode = ECORE_FILTER_REMOVE; + rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add); + if (rc != ECORE_SUCCESS) + return rc; - if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { - rc = qede_mac_int_ops(eth_dev, &ucast, 0); - } else { - rc = qede_ucast_filter(eth_dev, &ucast, 0); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, &ucast, - ECORE_SPQ_MODE_CB, NULL); + if (add) { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { + qdev->vxlan.num_filters++; + qdev->vxlan.filter_type = conf->filter_type; + } else { /* GENEVE */ + qdev->geneve.num_filters++; + qdev->geneve.filter_type = conf->filter_type; } - if (rc != ECORE_SUCCESS) - return rc; - qdev->vxlan.num_filters--; + if (!qdev->vxlan.enable || !qdev->geneve.enable) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + true); + } else { + if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) + qdev->vxlan.num_filters--; + else /*GENEVE*/ + qdev->geneve.num_filters--; /* Disable VXLAN if VXLAN filters become 0 */ - if (qdev->vxlan.num_filters == 0) - return qede_vxlan_enable(eth_dev, clss, false, true); - break; - default: - DP_ERR(edev, "Unsupported operation %d\n", filter_op); - return -EINVAL; + if ((qdev->vxlan.num_filters == 0) || + (qdev->geneve.num_filters == 0)) + return qede_tunn_enable(eth_dev, clss, + conf->tunnel_type, + false); } return 0; @@ -2500,13 +2817,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, case RTE_ETH_FILTER_TUNNEL: switch (filter_conf->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: + case RTE_TUNNEL_TYPE_GENEVE: DP_INFO(edev, "Packet steering to the specified Rx queue" - " is not supported with VXLAN tunneling"); - return(qede_vxlan_tunn_config(eth_dev, filter_op, + " is not supported with UDP tunneling"); + return(qede_tunn_filter_config(eth_dev, filter_op, filter_conf)); /* Place holders for future tunneling support */ - case RTE_TUNNEL_TYPE_GENEVE: case RTE_TUNNEL_TYPE_TEREDO: case RTE_TUNNEL_TYPE_NVGRE: case RTE_TUNNEL_TYPE_IP_IN_GRE: @@ -2790,22 +3107,38 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; if (do_once) { -#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO qede_print_adapter_info(adapter); -#endif do_once = false; } + /* Bring-up the link */ + qede_dev_set_link_state(eth_dev, true); + adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; SLIST_INIT(&adapter->fdir_info.fdir_list_head); SLIST_INIT(&adapter->vlan_list_head); SLIST_INIT(&adapter->uc_list_head); adapter->mtu = ETHER_MTU; - adapter->new_mtu = ETHER_MTU; - if (!is_vf) - if (qede_start_vport(adapter, adapter->mtu)) - return -1; + adapter->vport_started = false; + + /* VF tunnel offloads is enabled by default in PF driver */ + adapter->vxlan.num_filters = 0; + adapter->geneve.num_filters = 0; + if (is_vf) { + adapter->vxlan.enable = true; + adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; + adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; + adapter->geneve.enable = true; + + adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; + adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; + } else { + adapter->vxlan.enable = false; + adapter->geneve.enable = false; + } DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", adapter->primary_mac.addr_bytes[0], @@ -2961,3 +3294,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); + +RTE_INIT(qede_init_log); +static void +qede_init_log(void) +{ + qede_logtype_init = rte_log_register("pmd.net.qede.init"); + if (qede_logtype_init >= 0) + rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); + qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); + if (qede_logtype_driver >= 0) + rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); +} |