diff options
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r-- | drivers/net/vmxnet3/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/vmxnet3/base/upt1_defs.h | 7 | ||||
-rw-r--r-- | drivers/net/vmxnet3/base/vmxnet3_defs.h | 34 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethdev.c | 150 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethdev.h | 1 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ring.h | 2 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_rxtx.c | 212 |
7 files changed, 264 insertions, 146 deletions
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 6bfbf019..f1141da6 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_vmxnet3_uio.a +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) @@ -15,7 +16,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h index cf9141b2..5fd7a397 100644 --- a/drivers/net/vmxnet3/base/upt1_defs.h +++ b/drivers/net/vmxnet3/base/upt1_defs.h @@ -1,9 +1,6 @@ -/********************************************************* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2007 VMware, Inc. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - * - *********************************************************/ + */ /* upt1_defs.h * diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h index a455e270..bbec708c 100644 --- a/drivers/net/vmxnet3/base/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h @@ -1,9 +1,6 @@ -/********************************************************* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2007 VMware, Inc. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - * - *********************************************************/ + */ /* * vmxnet3_defs.h -- @@ -327,7 +324,32 @@ struct Vmxnet3_RxCompDescExt { uint8 segCnt; /* Number of aggregated packets */ uint8 dupAckCnt; /* Number of duplicate Acks */ __le16 tsDelta; /* TCP timestamp difference */ - __le32 dword2[2]; + __le32 dword2; +#ifdef __BIG_ENDIAN_BITFIELD + uint32 gen : 1; /* generation bit */ + uint32 type : 7; /* completion type */ + uint32 fcs : 1; /* Frame CRC correct */ + uint32 frg : 1; /* IP Fragment */ + uint32 v4 : 1; /* IPv4 */ + uint32 v6 : 1; /* IPv6 */ + uint32 ipc : 1; /* IP Checksum Correct */ + uint32 tcp : 1; /* TCP packet */ + uint32 udp : 1; /* UDP packet */ + uint32 tuc : 1; /* TCP/UDP Checksum Correct */ + uint32 mss : 16; +#else + uint32 mss : 16; + uint32 tuc : 1; /* TCP/UDP Checksum Correct */ + uint32 udp : 1; /* UDP packet */ + uint32 tcp : 1; /* TCP packet */ + uint32 ipc : 1; /* IP Checksum Correct */ + uint32 v6 : 1; /* IPv6 */ + uint32 v4 : 1; /* IPv4 */ + uint32 frg : 1; /* IP Fragment */ + uint32 fcs : 1; /* Frame CRC correct */ + uint32 type : 7; /* completion type */ + uint32 gen : 1; /* generation bit */ +#endif /* __BIG_ENDIAN_BITFIELD */ } #include "vmware_pack_end.h" Vmxnet3_RxCompDescExt; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 4e68aae6..ba932ff2 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -20,7 +20,6 @@ #include <rte_debug.h> #include <rte_pci.h> #include <rte_bus_pci.h> -#include <rte_atomic.h> #include <rte_branch_prediction.h> #include <rte_memory.h> #include <rte_memzone.h> @@ -43,6 +42,23 @@ #define VMXNET3_TX_MAX_SEG UINT8_MAX +#define VMXNET3_TX_OFFLOAD_CAP \ + (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define VMXNET3_RX_OFFLOAD_CAP \ + (DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_LRO | \ + DEV_RX_OFFLOAD_JUMBO_FRAME) + static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); static int vmxnet3_dev_configure(struct rte_eth_dev *dev); @@ -73,7 +89,7 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on); static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev, +static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static void vmxnet3_interrupt_handler(void *param); @@ -151,66 +167,14 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, if (mz) rte_memzone_free(mz); return rte_memzone_reserve_aligned(z_name, size, socket_id, - 0, align); + RTE_MEMZONE_IOVA_CONTIG, align); } if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); -} - -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ - -static int -vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to write to. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static int -vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; + return rte_memzone_reserve_aligned(z_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); } /* @@ -267,6 +231,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct vmxnet3_hw *hw = eth_dev->data->dev_private; uint32_t mac_hi, mac_lo, ver; + struct rte_eth_link link; PMD_INIT_FUNC_TRACE(); @@ -369,6 +334,13 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats)); memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats)); + /* set the initial link status */ + memset(&link, 0, sizeof(link)); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; + rte_eth_linkstatus_set(eth_dev, &link); + return 0; } @@ -612,9 +584,12 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) uint32_t mtu = dev->data->mtu; Vmxnet3_DriverShared *shared = hw->shared; Vmxnet3_DSDevRead *devRead = &shared->devRead; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; uint32_t i; int ret; + hw->mtu = mtu; + shared->magic = VMXNET3_REV1_MAGIC; devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; @@ -644,6 +619,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; + txq->shared = &hw->tqd_start[i]; + tqd->ctrl.txNumDeferred = 0; tqd->ctrl.txThreshold = 1; tqd->conf.txRingBasePA = txq->cmd_ring.basePA; @@ -664,6 +641,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; + rxq->shared = &hw->rqd_start[i]; + rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; rqd->conf.compRingBasePA = rxq->comp_ring.basePA; @@ -685,10 +664,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) devRead->rxFilterConf.rxMode = 0; /* Setting up feature flags */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM) devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { devRead->misc.uptFeatures |= VMXNET3_F_LRO; devRead->misc.maxNumRxSG = 0; } @@ -853,7 +832,10 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - vmxnet3_dev_atomic_write_link_status(dev, &link); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; + rte_eth_linkstatus_set(dev, &link); } /* @@ -1054,18 +1036,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_errors[i] = rxStats.pktsRxError; stats->ierrors += rxStats.pktsRxError; - stats->rx_nombuf += rxStats.pktsRxOutOfBuf; + stats->imissed += rxStats.pktsRxOutOfBuf; } return 0; } static void -vmxnet3_dev_info_get(struct rte_eth_dev *dev, +vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) { - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; @@ -1090,17 +1070,10 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT, }; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP; + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP; + dev_info->tx_queue_offload_capa = 0; } static const uint32_t * @@ -1117,13 +1090,14 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -static void +static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct vmxnet3_hw *hw = dev->data->dev_private; ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr)); vmxnet3_write_mac(hw, mac_addr->addr_bytes); + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1132,25 +1106,21 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct vmxnet3_hw *hw = dev->data->dev_private; - struct rte_eth_link old = { 0 }, link; + struct rte_eth_link link; uint32_t ret; memset(&link, 0, sizeof(link)); - vmxnet3_dev_atomic_read_link_status(dev, &old); VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); - if (ret & 0x1) { + if (ret & 0x1) link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_10G; - link.link_autoneg = ETH_LINK_AUTONEG; - } - - vmxnet3_dev_atomic_write_link_status(dev, &link); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; - return (old.link_status == link.link_status) ? -1 : 0; + return rte_eth_linkstatus_set(dev, &link); } static int @@ -1197,8 +1167,9 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct vmxnet3_hw *hw = dev->data->dev_private; uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); else memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); @@ -1260,9 +1231,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct vmxnet3_hw *hw = dev->data->dev_private; Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; uint32_t *vf_table = devRead->rxFilterConf.vfTable; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; else devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; @@ -1272,7 +1244,7 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); else memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index b2a8cf35..d3f2b352 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -87,6 +87,7 @@ struct vmxnet3_hw { uint64_t queueDescPA; uint16_t queue_desc_len; + uint16_t mtu; VMXNET3_RSSConf *rss_conf; uint64_t rss_confPA; diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h index 552180e8..50992349 100644 --- a/drivers/net/vmxnet3/vmxnet3_ring.h +++ b/drivers/net/vmxnet3/vmxnet3_ring.h @@ -14,7 +14,7 @@ #define VMXNET3_DEF_RX_RING_SIZE 128 /* Default rx data ring desc size */ -#define VMXNET3_DEF_RXDATA_DESC_SIZE 128 +#define VMXNET3_DEF_RXDATA_DESC_SIZE 256 #define VMXNET3_SUCCESS 0 #define VMXNET3_FAIL -1 diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 3a8c62fc..cf85f3d6 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -457,6 +457,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) { struct Vmxnet3_TxDataDesc *tdd; + /* Skip empty packets */ + if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { + txq->stats.drop_total++; + rte_pktmbuf_free(txm); + nb_tx++; + continue; + } + tdd = (struct Vmxnet3_TxDataDesc *) ((uint8 *)txq->data_ring.base + txq->cmd_ring.next2fill * @@ -477,6 +485,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * maximum size of mbuf segment size. */ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + + /* Skip empty segments */ + if (unlikely(m_seg->data_len == 0)) + continue; + if (copy_size) { uint64 offset = (uint64)txq->cmd_ring.next2fill * @@ -646,37 +659,154 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) return i; } - -/* Receive side checksum and other offloads */ -static void -vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) +/* MSS not provided by vmxnet3, guess one with available information */ +static uint16_t +vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, + struct rte_mbuf *rxm) { - /* Check for RSS */ - if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { - rxm->ol_flags |= PKT_RX_RSS_HASH; - rxm->hash.rss = rcd->rssHash; - } + uint32_t hlen, slen; + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + struct tcp_hdr *tcp_hdr; + char *ptr; + + RTE_ASSERT(rcd->tcp); + + ptr = rte_pktmbuf_mtod(rxm, char *); + slen = rte_pktmbuf_data_len(rxm); + hlen = sizeof(struct ether_hdr); - /* Check packet type, checksum errors, etc. Only support IPv4 for now. */ if (rcd->v4) { - struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *); - struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1); + if (unlikely(slen < hlen + sizeof(struct ipv4_hdr))) + return hw->mtu - sizeof(struct ipv4_hdr) + - sizeof(struct tcp_hdr); + + ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen); + hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + IPV4_IHL_MULTIPLIER; + } else if (rcd->v6) { + if (unlikely(slen < hlen + sizeof(struct ipv6_hdr))) + return hw->mtu - sizeof(struct ipv6_hdr) - + sizeof(struct tcp_hdr); + + ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen); + hlen += sizeof(struct ipv6_hdr); + if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) { + int frag; + + rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm, + &hlen, &frag); + } + } + + if (unlikely(slen < hlen + sizeof(struct tcp_hdr))) + return hw->mtu - hlen - sizeof(struct tcp_hdr) + + sizeof(struct ether_hdr); - if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr)) - rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT; - else - rxm->packet_type = RTE_PTYPE_L3_IPV4; + tcp_hdr = (struct tcp_hdr *)(ptr + hlen); + hlen += (tcp_hdr->data_off & 0xf0) >> 2; - if (!rcd->cnc) { - if (!rcd->ipc) - rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + if (rxm->udata64 > 1) + return (rte_pktmbuf_pkt_len(rxm) - hlen + + rxm->udata64 - 1) / rxm->udata64; + else + return hw->mtu - hlen + sizeof(struct ether_hdr); +} - if ((rcd->tcp || rcd->udp) && !rcd->tuc) - rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; +/* Receive side checksum and other offloads */ +static inline void +vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, + struct rte_mbuf *rxm, const uint8_t sop) +{ + uint64_t ol_flags = rxm->ol_flags; + uint32_t packet_type = rxm->packet_type; + + /* Offloads set in sop */ + if (sop) { + /* Set packet type */ + packet_type |= RTE_PTYPE_L2_ETHER; + + /* Check large packet receive */ + if (VMXNET3_VERSION_GE_2(hw) && + rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { + const Vmxnet3_RxCompDescExt *rcde = + (const Vmxnet3_RxCompDescExt *)rcd; + + rxm->tso_segsz = rcde->mss; + rxm->udata64 = rcde->segCnt; + ol_flags |= PKT_RX_LRO; + } + } else { /* Offloads set in eop */ + /* Check for RSS */ + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { + ol_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = rcd->rssHash; + } + + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + /* Check packet type, checksum errors, etc. */ + if (rcd->cnc) { + ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else { + if (rcd->v4) { + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + + if (rcd->ipc) + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else + ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if (rcd->tuc) { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (rcd->tcp) + packet_type |= RTE_PTYPE_L4_TCP; + else + packet_type |= RTE_PTYPE_L4_UDP; + } else { + if (rcd->tcp) { + packet_type |= RTE_PTYPE_L4_TCP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (rcd->udp) { + packet_type |= RTE_PTYPE_L4_UDP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + } else if (rcd->v6) { + packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + + if (rcd->tuc) { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (rcd->tcp) + packet_type |= RTE_PTYPE_L4_TCP; + else + packet_type |= RTE_PTYPE_L4_UDP; + } else { + if (rcd->tcp) { + packet_type |= RTE_PTYPE_L4_TCP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (rcd->udp) { + packet_type |= RTE_PTYPE_L4_UDP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + } else { + packet_type |= RTE_PTYPE_UNKNOWN; + } + + /* Old variants of vmxnet3 do not provide MSS */ + if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0) + rxm->tso_segsz = vmxnet3_guess_mss(hw, + rcd, rxm); } - } else { - rxm->packet_type = RTE_PTYPE_UNKNOWN; } + + rxm->ol_flags = ol_flags; + rxm->packet_type = packet_type; } /* @@ -776,6 +906,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->ol_flags = 0; rxm->vlan_tci = 0; + rxm->packet_type = 0; /* * If this is the first buffer of the received packet, @@ -807,29 +938,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } rxq->start_seg = rxm; - vmxnet3_rx_offload(rcd, rxm); + rxq->last_seg = rxm; + vmxnet3_rx_offload(hw, rcd, rxm, 1); } else { struct rte_mbuf *start = rxq->start_seg; RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); - start->pkt_len += rxm->data_len; - start->nb_segs++; + if (rxm->data_len) { + start->pkt_len += rxm->data_len; + start->nb_segs++; - rxq->last_seg->next = rxm; + rxq->last_seg->next = rxm; + rxq->last_seg = rxm; + } else { + rte_pktmbuf_free_seg(rxm); + } } - rxq->last_seg = rxm; if (rcd->eop) { struct rte_mbuf *start = rxq->start_seg; - /* Check for hardware stripped VLAN tag */ - if (rcd->ts) { - start->ol_flags |= (PKT_RX_VLAN | - PKT_RX_VLAN_STRIPPED); - start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); - } - + vmxnet3_rx_offload(hw, rcd, start, 0); rx_pkts[nb_rx++] = start; rxq->start_seg = NULL; } @@ -883,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { struct vmxnet3_hw *hw = dev->data->dev_private; const struct rte_memzone *mz; @@ -895,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) != - ETH_TXQ_FLAGS_NOXSUMSCTP) { - PMD_INIT_LOG(ERR, "SCTP checksum offload not supported"); - return -EINVAL; - } - txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) { @@ -910,7 +1034,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->shared = &hw->tqd_start[queue_idx]; + txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ txq->hw = hw; txq->qid = queue_idx; txq->stopped = TRUE; @@ -1013,7 +1137,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->mp = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->shared = &hw->rqd_start[queue_idx]; + rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ rxq->hw = hw; rxq->qid1 = queue_idx; rxq->qid2 = queue_idx + hw->num_rx_queues; |