diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-03-07 11:22:30 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-03-07 11:23:17 +0000 |
commit | c3f15def2ebe9cc255cf0e5cf32aa171f5b4326d (patch) | |
tree | 8c8fc77df57bca8c0bfe4d0e8797879e12c6d6f9 /drivers/net/qede | |
parent | 169a9de21e263aa6599cdc2d87a45ae158d9f509 (diff) |
New upstream version 17.11.1upstream/17.11.1
Change-Id: Ida1700b5dac8649fc563670a37278e636bea051c
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/qede')
-rw-r--r-- | drivers/net/qede/base/ecore_dcbx.c | 7 | ||||
-rw-r--r-- | drivers/net/qede/base/ecore_vf.c | 6 | ||||
-rw-r--r-- | drivers/net/qede/base/ecore_vfpf_if.h | 2 | ||||
-rw-r--r-- | drivers/net/qede/qede_ethdev.c | 215 | ||||
-rw-r--r-- | drivers/net/qede/qede_ethdev.h | 1 | ||||
-rw-r--r-- | drivers/net/qede/qede_rxtx.c | 191 | ||||
-rw-r--r-- | drivers/net/qede/qede_rxtx.h | 2 |
7 files changed, 321 insertions, 103 deletions
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c index 632297a7..21ddda92 100644 --- a/drivers/net/qede/base/ecore_dcbx.c +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -216,10 +216,9 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ETH; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, - "No action required, App TLV id = 0x%x" - " app_prio_bitmap = 0x%x\n", - id, app_prio_bitmap); + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index 25109dbd..e0f2dd5a 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -1385,6 +1385,12 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, if (sge_tpa_params->tpa_gro_consistent_flg) p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_GRO_CONSIST_FLAG; + if (sge_tpa_params->tpa_ipv4_tunn_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_TUNN_IPV4_EN_FLAG; + if (sge_tpa_params->tpa_ipv6_tunn_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_TUNN_IPV6_EN_FLAG; p_sge_tpa_tlv->tpa_max_aggs_num = sge_tpa_params->tpa_max_aggs_num; diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h index 3ccc7665..ecb00649 100644 --- a/drivers/net/qede/base/ecore_vfpf_if.h +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -424,6 +424,8 @@ struct vfpf_vport_update_sge_tpa_tlv { #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2) #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3) #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4) + #define VFPF_TPA_TUNN_IPV4_EN_FLAG (1 << 5) + #define VFPF_TPA_TUNN_IPV6_EN_FLAG (1 << 6) u8 update_sge_tpa_flags; #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 6f5ba2a9..73764e9a 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -9,6 +9,7 @@ #include "qede_ethdev.h" #include <rte_alarm.h> #include <rte_version.h> +#include <rte_kvargs.h> /* Globals */ static const struct qed_eth_ops *qed_ops; @@ -385,6 +386,62 @@ static void qede_print_adapter_info(struct qede_dev *qdev) } #endif +static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) +{ +#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); +#endif + unsigned int i = 0, j = 0, qid; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; + struct qede_tx_queue *txq; + + DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); + + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for_each_rss(qid) { + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rcv_pkts), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_hw_errors), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_alloc_errors), 0, + sizeof(uint64_t)); + + if (xstats) + for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) + OSAL_MEMSET((((char *) + (qdev->fp_array[qid].rxq)) + + qede_rxq_xstats_strings[j].offset), + 0, + sizeof(uint64_t)); + + i++; + if (i == rxq_stat_cntrs) + break; + } + + i = 0; + + for_each_tss(qid) { + txq = qdev->fp_array[qid].txq; + + OSAL_MEMSET((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, xmit_pkts)), 0, + sizeof(uint64_t)); + + i++; + if (i == txq_stat_cntrs) + break; + } +} + static int qede_start_vport(struct qede_dev *qdev, uint16_t mtu) { @@ -410,6 +467,8 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu) } } ecore_reset_vport_stats(edev); + if (IS_PF(edev)) + qede_reset_queue_stats(qdev, true); DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); return 0; @@ -453,13 +512,13 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.update_vport_active_tx_flg = 1; params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; -#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH - if (IS_VF(edev)) { - params.update_tx_switching_flg = 1; - params.tx_switching_flg = !flg; - DP_INFO(edev, "VF tx-switching is disabled\n"); + if (!qdev->enable_tx_switching) { + if (IS_VF(edev)) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; + DP_INFO(edev, "VF tx-switching is disabled\n"); + } } -#endif for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; params.opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -482,8 +541,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, /* Enable LRO in split mode */ sge_tpa_params->tpa_ipv4_en_flg = enable; sge_tpa_params->tpa_ipv6_en_flg = enable; - sge_tpa_params->tpa_ipv4_tunn_en_flg = false; - sge_tpa_params->tpa_ipv6_tunn_en_flg = false; + sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; + sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; /* set if tpa enable changes */ sge_tpa_params->update_tpa_en_flg = 1; /* set if tpa parameters should be handled */ @@ -1208,6 +1267,68 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) DP_INFO(edev, "Device is stopped\n"); } +#define QEDE_TX_SWITCHING "vf_txswitch" + +const char *valid_args[] = { + QEDE_TX_SWITCHING, + NULL, +}; + +static int qede_args_check(const char *key, const char *val, void *opaque) +{ + unsigned long tmp; + int ret = 0; + struct rte_eth_dev *eth_dev = opaque; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); +#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); +#endif + + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); + return errno; + } + + if (strcmp(QEDE_TX_SWITCHING, key) == 0) + qdev->enable_tx_switching = !!tmp; + + return ret; +} + +static int qede_args(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_kvargs *kvlist; + struct rte_devargs *devargs; + int ret; + int i; + + devargs = pci_dev->device.devargs; + if (!devargs) + return 0; /* return success */ + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (kvlist == NULL) + return -EINVAL; + + /* Process parameters. */ + for (i = 0; (valid_args[i] != NULL); ++i) { + if (rte_kvargs_count(kvlist, valid_args[i])) { + ret = rte_kvargs_process(kvlist, valid_args[i], + qede_args_check, eth_dev); + if (ret != ECORE_SUCCESS) { + rte_kvargs_free(kvlist); + return ret; + } + } + } + rte_kvargs_free(kvlist); + + return 0; +} + static int qede_dev_configure(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); @@ -1233,6 +1354,21 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) } } + /* We need to have min 1 RX queue.There is no min check in + * rte_eth_dev_configure(), so we are checking it here. + */ + if (eth_dev->data->nb_rx_queues == 0) { + DP_ERR(edev, "Minimum one RX queue is required\n"); + return -EINVAL; + } + + /* Enable Tx switching by default */ + qdev->enable_tx_switching = 1; + + /* Parse devargs and fix up rxmode */ + if (qede_args(eth_dev)) + return -ENOTSUP; + /* Sanity checks and throw warnings */ if (rxmode->enable_scatter) eth_dev->data->scattered_rx = 1; @@ -1269,18 +1405,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOMEM; } + /* If jumbo enabled adjust MTU */ + if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + /* VF's MTU has to be set using vport-start where as * PF's MTU can be updated via vport-update. */ if (IS_VF(edev)) { - if (qede_start_vport(qdev, rxmode->max_rx_pkt_len)) + if (qede_start_vport(qdev, eth_dev->data->mtu)) return -1; } else { - if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len)) + if (qede_update_mtu(eth_dev, eth_dev->data->mtu)) return -1; } - qdev->mtu = rxmode->max_rx_pkt_len; + qdev->mtu = eth_dev->data->mtu; qdev->new_mtu = qdev->mtu; /* Enable VLAN offloads by default */ @@ -1733,6 +1875,7 @@ qede_reset_xstats(struct rte_eth_dev *dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, true); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -1768,6 +1911,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, false); } static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) @@ -2159,16 +2303,23 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_dev_info dev_info = {0}; struct qede_fastpath *fp; + uint32_t max_rx_pkt_len; uint32_t frame_size; uint16_t rx_buf_size; uint16_t bufsz; + bool restart = false; int i; PMD_INIT_FUNC_TRACE(edev); + if (IS_VF(edev)) + return -ENOTSUP; qede_dev_info_get(dev, &dev_info); - frame_size = mtu + QEDE_ETH_OVERHEAD; + max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { - DP_ERR(edev, "MTU %u out of range\n", mtu); + DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", + mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - + ETHER_CRC_LEN - QEDE_ETH_OVERHEAD); return -EINVAL; } if (!dev->data->scattered_rx && @@ -2182,29 +2333,39 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) */ dev->rx_pkt_burst = qede_rxtx_pkts_dummy; dev->tx_pkt_burst = qede_rxtx_pkts_dummy; - qede_dev_stop(dev); + if (dev->data->dev_started) { + dev->data->dev_started = 0; + qede_dev_stop(dev); + restart = true; + } rte_delay_ms(1000); - qdev->mtu = mtu; + qdev->new_mtu = mtu; /* Fix up RX buf size for all queues of the port */ for_each_rss(i) { fp = &qdev->fp_array[i]; - bufsz = (uint16_t)rte_pktmbuf_data_room_size( - fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; - if (dev->data->scattered_rx) - rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; - else - rx_buf_size = mtu + QEDE_ETH_OVERHEAD; - rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); - fp->rxq->rx_buf_size = rx_buf_size; - DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + if (fp->rxq != NULL) { + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (dev->data->scattered_rx) + rx_buf_size = bufsz + ETHER_HDR_LEN + + ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; + else + rx_buf_size = frame_size; + rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); + fp->rxq->rx_buf_size = rx_buf_size; + DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + } } - qede_dev_start(dev); - if (frame_size > ETHER_MAX_LEN) + if (max_rx_pkt_len > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; + if (!dev->data->dev_started && restart) { + qede_dev_start(dev); + dev->data->dev_started = 1; + } /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; /* Reassign back */ dev->rx_pkt_burst = qede_recv_pkts; dev->tx_pkt_burst = qede_xmit_pkts; diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h index 021de5c0..8f21b338 100644 --- a/drivers/net/qede/qede_ethdev.h +++ b/drivers/net/qede/qede_ethdev.h @@ -185,6 +185,7 @@ struct qede_dev { struct qede_fastpath *fp_array; uint16_t mtu; uint16_t new_mtu; + bool enable_tx_switching; bool rss_enable; struct rte_eth_rss_conf rss_conf; uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 01a24e54..31132ceb 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -84,7 +84,6 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->port_id = dev->data->port_id; max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len; - qdev->mtu = max_rx_pkt_len; /* Fix up RX buffer size */ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; @@ -97,9 +96,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, } if (dev->data->scattered_rx) - rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; + rxq->rx_buf_size = bufsz + ETHER_HDR_LEN + + ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; else - rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD; + rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; /* Align to cache-line size if needed */ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size); @@ -158,7 +158,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, qdev->fp_array[queue_idx].rxq = rxq; DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n", - queue_idx, nb_desc, qdev->mtu, socket_id); + queue_idx, nb_desc, rxq->rx_buf_size, socket_id); return 0; } @@ -812,12 +812,18 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev) } } -static bool qede_tunn_exist(uint16_t flag) +static inline bool qede_tunn_exist(uint16_t flag) { return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag); } +static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag) +{ + return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag); +} + /* * qede_check_tunn_csum_l4: * Returns: @@ -844,33 +850,51 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag) return 0; } -/* Returns outer L3 and L4 packet_type for tunneled packets */ +/* Returns outer L2, L3 and L4 packet_type for tunneled packets */ static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m) { uint32_t packet_type = RTE_PTYPE_UNKNOWN; struct ether_hdr *eth_hdr; struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; + struct vlan_hdr *vlan_hdr; + uint16_t ethertype; + bool vlan_tagged = 0; + uint16_t len; eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); - if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { + len = sizeof(struct ether_hdr); + ethertype = rte_cpu_to_be_16(eth_hdr->ether_type); + + /* Note: Valid only if VLAN stripping is disabled */ + if (ethertype == ETHER_TYPE_VLAN) { + vlan_tagged = 1; + vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + len += sizeof(struct vlan_hdr); + ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto); + } + + if (ethertype == ETHER_TYPE_IPv4) { packet_type |= RTE_PTYPE_L3_IPV4; - ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, - sizeof(struct ether_hdr)); + ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len); if (ipv4_hdr->next_proto_id == IPPROTO_TCP) packet_type |= RTE_PTYPE_L4_TCP; else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) packet_type |= RTE_PTYPE_L4_UDP; - } else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { + } else if (ethertype == ETHER_TYPE_IPv6) { packet_type |= RTE_PTYPE_L3_IPV6; - ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, - sizeof(struct ether_hdr)); + ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len); if (ipv6_hdr->proto == IPPROTO_TCP) packet_type |= RTE_PTYPE_L4_TCP; else if (ipv6_hdr->proto == IPPROTO_UDP) packet_type |= RTE_PTYPE_L4_UDP; } + if (vlan_tagged) + packet_type |= RTE_PTYPE_L2_ETHER_VLAN; + else + packet_type |= RTE_PTYPE_L2_ETHER; + return packet_type; } @@ -1163,17 +1187,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags) [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE, [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] = - RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_GENEVE, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] = - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_GRE, [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] = - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_VXLAN, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] = - RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_GENEVE, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] = - RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_GRE, [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] = - RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER, + RTE_PTYPE_TUNNEL_VXLAN, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4, [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] = @@ -1253,7 +1277,7 @@ print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq, uint8_t bitfield) { PMD_RX_LOG(INFO, rxq, - "len 0x%x bf 0x%x hash_val 0x%x" + "len 0x%04x bf 0x%04x hash_val 0x%x" " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s" " inner_l2=%s inner_l3=%s inner_l4=%s\n", m->data_len, bitfield, m->hash.rss, @@ -1404,47 +1428,62 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) ol_flags |= PKT_RX_L4_CKSUM_BAD; } else { ol_flags |= PKT_RX_L4_CKSUM_GOOD; - if (tpa_start_flg) - flags = - cqe_start_tpa->tunnel_pars_flags.flags; - else - flags = fp_cqe->tunnel_pars_flags.flags; - tunn_parse_flag = flags; - /* Tunnel_type */ - packet_type = - qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag); - - /* Inner header */ - packet_type |= - qede_rx_cqe_to_pkt_type_inner(parse_flag); - - /* Outer L3/L4 types is not available in CQE */ - packet_type |= - qede_rx_cqe_to_pkt_type_outer(rx_mb); - } - } else { - PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n"); - if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { - PMD_RX_LOG(ERR, rxq, - "L4 csum failed, flags = 0x%x\n", - parse_flag); - rxq->rx_hw_errors++; - ol_flags |= PKT_RX_L4_CKSUM_BAD; - } else { - ol_flags |= PKT_RX_L4_CKSUM_GOOD; } - if (unlikely(qede_check_notunn_csum_l3(rx_mb, - parse_flag))) { + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { PMD_RX_LOG(ERR, rxq, - "IP csum failed, flags = 0x%x\n", - parse_flag); - rxq->rx_hw_errors++; - ol_flags |= PKT_RX_IP_CKSUM_BAD; + "Outer L3 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_EIP_CKSUM_BAD; } else { - ol_flags |= PKT_RX_IP_CKSUM_GOOD; - packet_type = - qede_rx_cqe_to_pkt_type(parse_flag); + ol_flags |= PKT_RX_IP_CKSUM_GOOD; } + + if (tpa_start_flg) + flags = cqe_start_tpa->tunnel_pars_flags.flags; + else + flags = fp_cqe->tunnel_pars_flags.flags; + tunn_parse_flag = flags; + + /* Tunnel_type */ + packet_type = + qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag); + + /* Inner header */ + packet_type |= + qede_rx_cqe_to_pkt_type_inner(parse_flag); + + /* Outer L3/L4 types is not available in CQE */ + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + + /* Outer L3/L4 types is not available in CQE. + * Need to add offset to parse correctly, + */ + rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; + packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } + + /* Common handling for non-tunnel packets and for inner + * headers in the case of tunnel. + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, + "L4 csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { + PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= PKT_RX_IP_CKSUM_BAD; + } else { + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); } if (CQE_HAS_VLAN(parse_flag) || @@ -1549,7 +1588,8 @@ next_cqe: /* Populate scatter gather buffer descriptor fields */ static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, - struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3) + struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3, + uint16_t start_seg) { struct qede_tx_queue *txq = p_txq; struct eth_tx_bd *tx_bd = NULL; @@ -1558,7 +1598,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, /* Check for scattered buffers */ while (m_seg) { - if (nb_segs == 0) { + if (start_seg == 0) { if (!*bd2) { *bd2 = (struct eth_tx_2nd_bd *) ecore_chain_produce(&txq->tx_pbl); @@ -1568,7 +1608,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, mapping = rte_mbuf_data_iova(m_seg); QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len); - } else if (nb_segs == 1) { + } else if (start_seg == 1) { if (!*bd3) { *bd3 = (struct eth_tx_3rd_bd *) ecore_chain_produce(&txq->tx_pbl); @@ -1606,20 +1646,24 @@ print_tx_bd_info(struct qede_tx_queue *txq, if (bd1) PMD_TX_LOG(INFO, txq, - "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x", - rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds, - bd1->data.bd_flags.bitfields, - rte_cpu_to_le_16(bd1->data.bitfields)); + "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x", + rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds, + bd1->data.bd_flags.bitfields, + rte_cpu_to_le_16(bd1->data.bitfields)); if (bd2) PMD_TX_LOG(INFO, txq, - "BD2: nbytes=%u bf=%04x\n", - rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1); + "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n", + rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1, + bd2->data.bitfields2, bd2->data.tunn_ip_size); if (bd3) PMD_TX_LOG(INFO, txq, - "BD3: nbytes=%u bf=%04x mss=%u\n", - rte_cpu_to_le_16(bd3->nbytes), - rte_cpu_to_le_16(bd3->data.bitfields), - rte_cpu_to_le_16(bd3->data.lso_mss)); + "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x " + "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n", + rte_cpu_to_le_16(bd3->nbytes), + rte_cpu_to_le_16(bd3->data.bitfields), + rte_cpu_to_le_16(bd3->data.lso_mss), + bd3->data.tunn_l4_hdr_start_offset_w, + bd3->data.tunn_hdr_size_w); rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf)); PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf); @@ -1897,6 +1941,10 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * and BD2 onwards for data. */ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len; + if (tunn_flg) + hdr_size += mbuf->outer_l2_len + + mbuf->outer_l3_len; + bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT; bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; @@ -2013,9 +2061,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Handle fragmented MBUF */ m_seg = mbuf->next; + /* Encode scatter gather buffer descriptors if required */ - nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3); + nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1); bd1->data.nbds = nbds + nb_frags; + txq->nb_tx_avail -= bd1->data.nbds; txq->sw_tx_prod++; rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf); @@ -2023,7 +2073,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); #ifdef RTE_LIBRTE_QEDE_DEBUG_TX print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags); - PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg); #endif nb_pkt_sent++; txq->xmit_pkts++; diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h index acf9e475..ae88206d 100644 --- a/drivers/net/qede/qede_rxtx.h +++ b/drivers/net/qede/qede_rxtx.h @@ -64,7 +64,7 @@ #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \ ~(QEDE_FW_RX_ALIGN_END - 1)) /* Note: QEDE_LLC_SNAP_HDR_LEN is optional */ -#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \ +#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \ + (QEDE_LLC_SNAP_HDR_LEN)) #define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\ |