summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/Makefile84
-rw-r--r--drivers/net/vmxnet3/base/README47
-rw-r--r--drivers/net/vmxnet3/base/upt1_defs.h117
-rw-r--r--drivers/net/vmxnet3/base/vmware_pack_begin.h32
-rw-r--r--drivers/net/vmxnet3/base/vmware_pack_end.h32
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h744
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_osdep.h48
-rw-r--r--drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c958
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h185
-rw-r--r--drivers/net/vmxnet3/vmxnet3_logs.h74
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h169
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c1105
13 files changed, 3599 insertions, 0 deletions
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
new file mode 100644
index 00000000..4cf3b33b
--- /dev/null
+++ b/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,84 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_vmxnet3_uio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CC), icc)
+#
+# CFLAGS for icc
+#
+CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+
+else ifeq ($(CC), clang)
+#
+# CFLAGS for clang
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+else
+#
+# CFLAGS for gcc
+#
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing -Wno-format-extra-args
+
+endif
+
+VPATH += $(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_vmxnet3_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vmxnet3/base/README b/drivers/net/vmxnet3/base/README
new file mode 100644
index 00000000..599a3661
--- /dev/null
+++ b/drivers/net/vmxnet3/base/README
@@ -0,0 +1,47 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel VMXNET3 driver
+===================
+
+This directory contains source code of FreeBSD VMXNET3 driver released by VMware.
+In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change.
+The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and vmxnet3_osdep.h
+are crated to adapt to the needs from above 2 files.
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ vmxnet3_osdep.h
diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h
new file mode 100644
index 00000000..d9144e32
--- /dev/null
+++ b/drivers/net/vmxnet3/base/upt1_defs.h
@@ -0,0 +1,117 @@
+/*********************************************************
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *********************************************************/
+
+/* upt1_defs.h
+ *
+ * Definitions for UPTv1
+ *
+ * Some of the defs are duplicated in vmkapi_net_upt.h, because
+ * vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can
+ * only include vmkapi headers. Make sure they are kept in sync!
+ */
+
+#ifndef _UPT1_DEFS_H
+#define _UPT1_DEFS_H
+
+#define UPT1_MAX_TX_QUEUES 64
+#define UPT1_MAX_RX_QUEUES 64
+
+#define UPT1_MAX_INTRS (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES)
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_TxStats {
+ uint64 TSOPktsTxOK; /* TSO pkts post-segmentation */
+ uint64 TSOBytesTxOK;
+ uint64 ucastPktsTxOK;
+ uint64 ucastBytesTxOK;
+ uint64 mcastPktsTxOK;
+ uint64 mcastBytesTxOK;
+ uint64 bcastPktsTxOK;
+ uint64 bcastBytesTxOK;
+ uint64 pktsTxError;
+ uint64 pktsTxDiscard;
+}
+#include "vmware_pack_end.h"
+UPT1_TxStats;
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RxStats {
+ uint64 LROPktsRxOK; /* LRO pkts */
+ uint64 LROBytesRxOK; /* bytes from LRO pkts */
+ /* the following counters are for pkts from the wire, i.e., pre-LRO */
+ uint64 ucastPktsRxOK;
+ uint64 ucastBytesRxOK;
+ uint64 mcastPktsRxOK;
+ uint64 mcastBytesRxOK;
+ uint64 bcastPktsRxOK;
+ uint64 bcastBytesRxOK;
+ uint64 pktsRxOutOfBuf;
+ uint64 pktsRxError;
+}
+#include "vmware_pack_end.h"
+UPT1_RxStats;
+
+/* interrupt moderation level */
+#define UPT1_IML_NONE 0 /* no interrupt moderation */
+#define UPT1_IML_HIGHEST 7 /* least intr generated */
+#define UPT1_IML_ADAPTIVE 8 /* adpative intr moderation */
+
+/* values for UPT1_RSSConf.hashFunc */
+#define UPT1_RSS_HASH_TYPE_NONE 0x0
+#define UPT1_RSS_HASH_TYPE_IPV4 0x01
+#define UPT1_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define UPT1_RSS_HASH_TYPE_IPV6 0x04
+#define UPT1_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define UPT1_RSS_HASH_FUNC_NONE 0x0
+#define UPT1_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define UPT1_RSS_MAX_KEY_SIZE 40
+#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct UPT1_RSSConf {
+ uint16 hashType;
+ uint16 hashFunc;
+ uint16 hashKeySize;
+ uint16 indTableSize;
+ uint8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
+ uint8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+}
+#include "vmware_pack_end.h"
+UPT1_RSSConf;
+
+/* features */
+#define UPT1_F_RXCSUM 0x0001 /* rx csum verification */
+#define UPT1_F_RSS 0x0002
+#define UPT1_F_RXVLAN 0x0004 /* VLAN tag stripping */
+#define UPT1_F_LRO 0x0008
+
+#endif
diff --git a/drivers/net/vmxnet3/base/vmware_pack_begin.h b/drivers/net/vmxnet3/base/vmware_pack_begin.h
new file mode 100644
index 00000000..860ec4c3
--- /dev/null
+++ b/drivers/net/vmxnet3/base/vmware_pack_begin.h
@@ -0,0 +1,32 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/drivers/net/vmxnet3/base/vmware_pack_end.h b/drivers/net/vmxnet3/base/vmware_pack_end.h
new file mode 100644
index 00000000..860ec4c3
--- /dev/null
+++ b/drivers/net/vmxnet3/base/vmware_pack_end.h
@@ -0,0 +1,32 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
new file mode 100644
index 00000000..68ae8b6d
--- /dev/null
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -0,0 +1,744 @@
+/*********************************************************
+ * Copyright (C) 2007 VMware, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *********************************************************/
+
+/*
+ * vmxnet3_defs.h --
+ *
+ * Definitions shared by device emulation and guest drivers for
+ * VMXNET3 NIC
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define _VMXNET3_DEFS_H_
+
+#include "vmxnet3_osdep.h"
+#include "upt1_defs.h"
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+#define VMXNET3_REG_VRRS 0x0 /* Vmxnet3 Revision Report Selection */
+#define VMXNET3_REG_UVRS 0x8 /* UPT Version Report Selection */
+#define VMXNET3_REG_DSAL 0x10 /* Driver Shared Address Low */
+#define VMXNET3_REG_DSAH 0x18 /* Driver Shared Address High */
+#define VMXNET3_REG_CMD 0x20 /* Command */
+#define VMXNET3_REG_MACL 0x28 /* MAC Address Low */
+#define VMXNET3_REG_MACH 0x30 /* MAC Address High */
+#define VMXNET3_REG_ICR 0x38 /* Interrupt Cause Register */
+#define VMXNET3_REG_ECR 0x40 /* Event Cause Register */
+
+#define VMXNET3_REG_WSAL 0xF00 /* Wireless Shared Address Lo */
+#define VMXNET3_REG_WSAH 0xF08 /* Wireless Shared Address Hi */
+#define VMXNET3_REG_WCMD 0xF18 /* Wireless Command */
+
+/* BAR 0 */
+#define VMXNET3_REG_IMR 0x0 /* Interrupt Mask Register */
+#define VMXNET3_REG_TXPROD 0x600 /* Tx Producer Index */
+#define VMXNET3_REG_RXPROD 0x800 /* Rx Producer Index for ring 1 */
+#define VMXNET3_REG_RXPROD2 0xA00 /* Rx Producer Index for ring 2 */
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+
+/*
+ * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at
+ * offset 14h) as well as the MSI-X BAR are combined into one PhysMem region:
+ * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE-->
+ * -------------------------------------------------------------------------
+ * |Pass Thru Registers | Virtual Dev Registers | MSI-X Vector/PBA Table |
+ * -------------------------------------------------------------------------
+ * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h"
+ */
+#define VMXNET3_PHYSMEM_PAGES 4
+
+#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
+#define VMXNET3_REG_ALIGN_MASK 0x7
+
+/* I/O Mapped access to registers */
+#define VMXNET3_IO_TYPE_PT 0
+#define VMXNET3_IO_TYPE_VD 1
+#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+#ifndef __le16
+#define __le16 uint16
+#endif
+#ifndef __le32
+#define __le32 uint32
+#endif
+#ifndef __le64
+#define __le64 uint64
+#endif
+
+typedef enum {
+ VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
+ VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
+ VMXNET3_CMD_QUIESCE_DEV,
+ VMXNET3_CMD_RESET_DEV,
+ VMXNET3_CMD_UPDATE_RX_MODE,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS,
+ VMXNET3_CMD_UPDATE_RSSIDT,
+ VMXNET3_CMD_UPDATE_IML,
+ VMXNET3_CMD_UPDATE_PMCFG,
+ VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_STOP_EMULATION,
+ VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_ACTIVATE_VF,
+
+ VMXNET3_CMD_FIRST_GET = 0xF00D0000,
+ VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
+ VMXNET3_CMD_GET_STATS,
+ VMXNET3_CMD_GET_LINK,
+ VMXNET3_CMD_GET_PERM_MAC_LO,
+ VMXNET3_CMD_GET_PERM_MAC_HI,
+ VMXNET3_CMD_GET_DID_LO,
+ VMXNET3_CMD_GET_DID_HI,
+ VMXNET3_CMD_GET_DEV_EXTRA_INFO,
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
+} Vmxnet3_Cmd;
+
+/* Adaptive Ring Info Flags */
+#define VMXNET3_DISABLE_ADAPTIVE_RING 1
+
+/*
+ * Little Endian layout of bitfields -
+ * Byte 0 : 7.....len.....0
+ * Byte 1 : rsvd gen 13.len.8
+ * Byte 2 : 5.msscof.0 ext1 dtype
+ * Byte 3 : 13...msscof...6
+ *
+ * Big Endian layout of bitfields -
+ * Byte 0: 13...msscof...6
+ * Byte 1 : 5.msscof.0 ext1 dtype
+ * Byte 2 : rsvd gen 13.len.8
+ * Byte 3 : 7.....len.....0
+ *
+ * Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ * the bit fields correctly. And cpu_to_le32 will convert bitfields
+ * bit fields written by big endian driver to format required by device.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+ uint32 ext1:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 rsvd:1;
+ uint32 gen:1; /* generation bit */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 gen:1; /* generation bit */
+ uint32 rsvd:1;
+ uint32 dtype:1; /* descriptor type */
+ uint32 ext1:1;
+ uint32 msscof:14; /* MSS, checksum offset, flags */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag to Insert */
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 ext2:1;
+ uint32 cq:1; /* completion request */
+ uint32 eop:1; /* End Of Packet */
+ uint32 om:2; /* offload mode */
+ uint32 hlen:10; /* header len */
+#else
+ uint32 hlen:10; /* header len */
+ uint32 om:2; /* offload mode */
+ uint32 eop:1; /* End Of Packet */
+ uint32 cq:1; /* completion request */
+ uint32 ext2:1;
+ uint32 ti:1; /* VLAN Tag Insertion */
+ uint32 tci:16; /* Tag to Insert */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDesc;
+
+/* TxDesc.OM values */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define VMXNET3_TXD_EOP_SHIFT 12
+#define VMXNET3_TXD_CQ_SHIFT 13
+#define VMXNET3_TXD_GEN_SHIFT 14
+#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
+
+#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
+#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define VMXNET3_TXD_GEN_SIZE 1
+#define VMXNET3_TXD_EOP_SIZE 1
+
+#define VMXNET3_HDR_COPY_SIZE 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxDataDesc {
+ uint8 data[VMXNET3_HDR_COPY_SIZE];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxDataDesc;
+
+#define VMXNET3_TCD_GEN_SHIFT 31
+#define VMXNET3_TCD_GEN_SIZE 1
+#define VMXNET3_TCD_TXIDX_SHIFT 0
+#define VMXNET3_TCD_TXIDX_SIZE 12
+#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxCompDesc {
+ uint32 txdIdx:12; /* Index of the EOP TxDesc */
+ uint32 ext1:20;
+
+ __le32 ext2;
+ __le32 ext3;
+
+ uint32 rsvd:24;
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxDesc {
+ __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* Generation bit */
+ uint32 rsvd:15;
+ uint32 dtype:1; /* Descriptor type */
+ uint32 btype:1; /* Buffer Type */
+ uint32 len:14;
+#else
+ uint32 len:14;
+ uint32 btype:1; /* Buffer Type */
+ uint32 dtype:1; /* Descriptor type */
+ uint32 rsvd:15;
+ uint32 gen:1; /* Generation bit */
+#endif
+ __le32 ext1;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxDesc;
+
+/* values of RXD.BTYPE */
+#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
+#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define VMXNET3_RXD_BTYPE_SHIFT 14
+#define VMXNET3_RXD_GEN_SHIFT 31
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 ext2:1;
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 sop:1; /* Start of Packet */
+ uint32 eop:1; /* End of Packet */
+ uint32 ext1:2;
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+#else
+ uint32 rxdIdx:12; /* Index of the RxDesc */
+ uint32 ext1:2;
+ uint32 eop:1; /* End of Packet */
+ uint32 sop:1; /* Start of Packet */
+ uint32 rqID:10; /* rx queue/ring ID */
+ uint32 rssType:4; /* RSS hash type used */
+ uint32 cnc:1; /* Checksum Not Calculated */
+ uint32 ext2:1;
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+ __le32 rssHash; /* RSS hash value */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 tci:16; /* Tag stripped */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 err:1; /* Error */
+ uint32 len:14; /* data length */
+#else
+ uint32 len:14; /* data length */
+ uint32 err:1; /* Error */
+ uint32 ts:1; /* Tag is stripped */
+ uint32 tci:16; /* Tag stripped */
+#endif /* __BIG_ENDIAN_BITFIELD */
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen:1; /* generation bit */
+ uint32 type:7; /* completion type */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 frg:1; /* IP Fragment */
+ uint32 v4:1; /* IPv4 */
+ uint32 v6:1; /* IPv6 */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 tcp:1; /* TCP packet */
+ uint32 udp:1; /* UDP packet */
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 csum:16;
+#else
+ uint32 csum:16;
+ uint32 tuc:1; /* TCP/UDP Checksum Correct */
+ uint32 udp:1; /* UDP packet */
+ uint32 tcp:1; /* TCP packet */
+ uint32 ipc:1; /* IP Checksum Correct */
+ uint32 v6:1; /* IPv6 */
+ uint32 v4:1; /* IPv4 */
+ uint32 frg:1; /* IP Fragment */
+ uint32 fcs:1; /* Frame CRC correct */
+ uint32 type:7; /* completion type */
+ uint32 gen:1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxCompDescExt {
+ __le32 dword1;
+ uint8 segCnt; /* Number of aggregated packets */
+ uint8 dupAckCnt; /* Number of duplicate Acks */
+ __le16 tsDelta; /* TCP timestamp difference */
+ __le32 dword2[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxCompDescExt;
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define VMXNET3_RCD_TUC_SHIFT 16
+#define VMXNET3_RCD_IPC_SHIFT 19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define VMXNET3_RCD_TYPE_SHIFT 56
+#define VMXNET3_RCD_GEN_SHIFT 63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | 1 << VMXNET3_RCD_IPC_SHIFT)
+
+/* value of RxCompDesc.rssType */
+#define VMXNET3_RCD_RSS_TYPE_NONE 0
+#define VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV4 2
+#define VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+
+/* a union for accessing all cmd/completion descriptors */
+typedef union Vmxnet3_GenericDesc {
+ __le64 qword[2];
+ __le32 dword[4];
+ __le16 word[8];
+ Vmxnet3_TxDesc txd;
+ Vmxnet3_RxDesc rxd;
+ Vmxnet3_TxCompDesc tcd;
+ Vmxnet3_RxCompDesc rcd;
+ Vmxnet3_RxCompDescExt rcdExt;
+} Vmxnet3_GenericDesc;
+
+#define VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define VMXNET3_MAX_TXD_PER_PKT 16
+
+/* Max size of a single rx buffer */
+#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define VMXNET3_MIN_T0_BUF_SIZE 128
+#define VMXNET3_MAX_CSUM_OFFSET 1024
+
+/* Ring base address alignment */
+#define VMXNET3_RING_BA_ALIGN 512
+#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define VMXNET3_RING_SIZE_ALIGN 32
+#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define VMXNET3_TX_RING_MAX_SIZE 4096
+#define VMXNET3_TC_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RC_RING_MAX_SIZE 8192
+
+/* a list of reasons for queue stop */
+
+#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
+#define VMXNET3_ERR_TXD_REUSE 0x80000001 /* reuse a TxDesc before tx completion */
+#define VMXNET3_ERR_BIG_PKT 0x80000002 /* too many TxDesc for a pkt */
+#define VMXNET3_ERR_DESC_NOT_SPT 0x80000003 /* descriptor type not supported */
+#define VMXNET3_ERR_SMALL_BUF 0x80000004 /* type 0 buffer too small */
+#define VMXNET3_ERR_STRESS 0x80000005 /* stress option firing in vmkernel */
+#define VMXNET3_ERR_SWITCH 0x80000006 /* mode switch failure */
+#define VMXNET3_ERR_TXD_INVALID 0x80000007 /* invalid TxDesc */
+
+/* completion descriptor types */
+#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP_LRO 4 /* Rx Completion Descriptor for LRO */
+
+#define VMXNET3_GOS_BITS_UNK 0 /* unknown */
+#define VMXNET3_GOS_BITS_32 1
+#define VMXNET3_GOS_BITS_64 2
+
+#define VMXNET3_GOS_TYPE_UNK 0 /* unknown */
+#define VMXNET3_GOS_TYPE_LINUX 1
+#define VMXNET3_GOS_TYPE_WIN 2
+#define VMXNET3_GOS_TYPE_SOLARIS 3
+#define VMXNET3_GOS_TYPE_FREEBSD 4
+#define VMXNET3_GOS_TYPE_PXE 5
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_GOSInfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gosMisc: 10; /* other info about gos */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+#else
+ uint32 gosBits: 2; /* 32-bit or 64-bit? */
+ uint32 gosType: 4; /* which guest */
+ uint32 gosVer: 16; /* gos version */
+ uint32 gosMisc: 10; /* other info about gos */
+#endif /* __BIG_ENDIAN_BITFIELD */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_GOSInfo;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverInfo {
+ __le32 version; /* driver version */
+ Vmxnet3_GOSInfo gos;
+ __le32 vmxnet3RevSpt; /* vmxnet3 revision supported */
+ __le32 uptVerSpt; /* upt version supported */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverInfo;
+
+#define VMXNET3_REV1_MAGIC 0xbabefee1
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define VMXNET3_QUEUE_DESC_ALIGN 128
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MiscConf {
+ Vmxnet3_DriverInfo driverInfo;
+ __le64 uptFeatures;
+ __le64 ddPA; /* driver data PA */
+ __le64 queueDescPA; /* queue descriptor table PA */
+ __le32 ddLen; /* driver data len */
+ __le32 queueDescLen; /* queue descriptor table len, in bytes */
+ __le32 mtu;
+ __le16 maxNumRxSG;
+ uint8 numTxQueues;
+ uint8 numRxQueues;
+ __le32 reserved[4];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MiscConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueConf {
+ __le64 txRingBasePA;
+ __le64 dataRingBasePA;
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 txRingSize; /* # of tx desc */
+ __le32 dataRingSize; /* # of data desc */
+ __le32 compRingSize; /* # of comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad[7];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueConf {
+ __le64 rxRingBasePA[2];
+ __le64 compRingBasePA;
+ __le64 ddPA; /* driver data */
+ __le64 reserved;
+ __le32 rxRingSize[2]; /* # of rx desc */
+ __le32 compRingSize; /* # of rx comp desc */
+ __le32 ddLen; /* size of driver data */
+ uint8 intrIdx;
+ uint8 _pad[7];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueConf;
+
+enum vmxnet3_intr_mask_mode {
+ VMXNET3_IMM_AUTO = 0,
+ VMXNET3_IMM_ACTIVE = 1,
+ VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+ VMXNET3_IT_AUTO = 0,
+ VMXNET3_IT_INTX = 1,
+ VMXNET3_IT_MSI = 2,
+ VMXNET3_IT_MSIX = 3
+};
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+/* addition 1 for events */
+#define VMXNET3_MAX_INTRS 25
+
+/* value of intrCtrl */
+#define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_IntrConf {
+ Bool autoMask;
+ uint8 numIntrs; /* # of interrupts */
+ uint8 eventIntrIdx;
+ uint8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */
+ __le32 intrCtrl;
+ __le32 reserved[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_IntrConf;
+
+/* one bit per VLAN ID, the size is in the units of uint32 */
+#define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8))
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_QueueStatus {
+ Bool stopped;
+ uint8 _pad[3];
+ __le32 error;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_QueueStatus;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueCtrl {
+ __le32 txNumDeferred;
+ __le32 txThreshold;
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueCtrl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueCtrl {
+ Bool updateRxProd;
+ uint8 _pad[7];
+ __le64 reserved;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueCtrl;
+
+#define VMXNET3_RXM_UCAST 0x01 /* unicast only */
+#define VMXNET3_RXM_MCAST 0x02 /* multicast passing the filters */
+#define VMXNET3_RXM_BCAST 0x04 /* broadcast only */
+#define VMXNET3_RXM_ALL_MULTI 0x08 /* all multicast */
+#define VMXNET3_RXM_PROMISC 0x10 /* promiscuous */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxFilterConf {
+ __le32 rxMode; /* VMXNET3_RXM_xxx */
+ __le16 mfTableLen; /* size of the multicast filter table */
+ __le16 _pad1;
+ __le64 mfTablePA; /* PA of the multicast filters table */
+ __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxFilterConf;
+
+#define VMXNET3_PM_MAX_FILTERS 6
+#define VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching filters */
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PM_PktFilter {
+ uint8 maskSize;
+ uint8 patternSize;
+ uint8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+ uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+ uint8 pad[6];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PM_PktFilter;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_PMConf {
+ __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
+ uint8 numFilters;
+ uint8 pad[5];
+ Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_PMConf;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_VariableLenConfDesc {
+ __le32 confVer;
+ __le32 confLen;
+ __le64 confPA;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_VariableLenConfDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DSDevRead {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ Vmxnet3_MiscConf misc;
+ Vmxnet3_IntrConf intrConf;
+ Vmxnet3_RxFilterConf rxFilterConf;
+ Vmxnet3_VariableLenConfDesc rssConfDesc;
+ Vmxnet3_VariableLenConfDesc pmConfDesc;
+ Vmxnet3_VariableLenConfDesc pluginConfDesc;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DSDevRead;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_TxQueueDesc {
+ Vmxnet3_TxQueueCtrl ctrl;
+ Vmxnet3_TxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_TxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_TxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_RxQueueDesc {
+ Vmxnet3_RxQueueCtrl ctrl;
+ Vmxnet3_RxQueueConf conf;
+ /* Driver read after a GET command */
+ Vmxnet3_QueueStatus status;
+ UPT1_RxStats stats;
+ uint8 _pad[88]; /* 128 aligned */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_RxQueueDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_DriverShared {
+ __le32 magic;
+ __le32 pad; /* make devRead start at 64-bit boundaries */
+ Vmxnet3_DSDevRead devRead;
+ __le32 ecr;
+ __le32 reserved[5];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_DriverShared;
+
+#define VMXNET3_ECR_RQERR (1 << 0)
+#define VMXNET3_ECR_TQERR (1 << 1)
+#define VMXNET3_ECR_LINK (1 << 2)
+#define VMXNET3_ECR_DIC (1 << 3)
+#define VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+do {\
+ (idx)++;\
+ if (UNLIKELY((idx) == (ring_size))) {\
+ (idx) = 0;\
+ }\
+} while (0)
+
+#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] |= (1 << (vid & 31))
+#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+ vfTable[vid >> 5] &= ~(1 << (vid & 31))
+
+#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_MIN_MTU 60
+
+#define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up
+#define VMXNET3_LINK_DOWN 0
+
+#define VMXWIFI_DRIVER_SHARED_LEN 8192
+
+#define VMXNET3_DID_PASSTHRU 0xFFFF
+
+#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/base/vmxnet3_osdep.h b/drivers/net/vmxnet3/base/vmxnet3_osdep.h
new file mode 100644
index 00000000..b6e3469c
--- /dev/null
+++ b/drivers/net/vmxnet3/base/vmxnet3_osdep.h
@@ -0,0 +1,48 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_OSDEP_H
+#define _VMXNET3_OSDEP_H
+
+typedef uint64_t uint64;
+typedef uint32_t uint32;
+typedef uint16_t uint16;
+typedef uint8_t uint8;
+typedef int bool;
+typedef char Bool;
+
+#ifndef UNLIKELY
+#define UNLIKELY(x) __builtin_expect((x),0)
+#endif /* unlikely */
+
+#endif /* _VMXNET3_OSDEP_H */
diff --git a/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map b/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map
new file mode 100644
index 00000000..ef353984
--- /dev/null
+++ b/drivers/net/vmxnet3/rte_pmd_vmxnet3_version.map
@@ -0,0 +1,4 @@
+DPDK_2.0 {
+
+ local: *;
+};
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
new file mode 100644
index 00000000..bd7a2bb7
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -0,0 +1,958 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include "base/vmxnet3_defs.h"
+
+#include "vmxnet3_ring.h"
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+#define PROCESS_SYS_EVENTS 0
+
+static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
+static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
+static int vmxnet3_dev_start(struct rte_eth_dev *dev);
+static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
+static void vmxnet3_dev_close(struct rte_eth_dev *dev);
+static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
+static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
+ uint16_t vid, int on);
+static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+
+#if PROCESS_SYS_EVENTS == 1
+static void vmxnet3_process_events(struct vmxnet3_hw *);
+#endif
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_vmxnet3_map[] = {
+
+#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
+ .dev_configure = vmxnet3_dev_configure,
+ .dev_start = vmxnet3_dev_start,
+ .dev_stop = vmxnet3_dev_stop,
+ .dev_close = vmxnet3_dev_close,
+ .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
+ .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
+ .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
+ .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
+ .link_update = vmxnet3_dev_link_update,
+ .stats_get = vmxnet3_dev_stats_get,
+ .mac_addr_set = vmxnet3_mac_addr_set,
+ .dev_infos_get = vmxnet3_dev_info_get,
+ .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
+ .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
+ .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
+ .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
+ .rx_queue_release = vmxnet3_dev_rx_queue_release,
+ .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
+ .tx_queue_release = vmxnet3_dev_tx_queue_release,
+};
+
+static const struct rte_memzone *
+gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
+ const char *post_string, int socket_id, uint16_t align)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%d_%s",
+ dev->driver->pci_drv.name, dev->data->port_id, post_string);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, size,
+ socket_id, 0, align);
+}
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+
+static int
+vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &(dev->data->dev_link);
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to write to.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static int
+vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &(dev->data->dev_link);
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * This function is based on vmxnet3_disable_intr()
+ */
+static void
+vmxnet3_disable_intr(struct vmxnet3_hw *hw)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+ for (i = 0; i < VMXNET3_MAX_INTRS; i++)
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+ uint32_t mac_hi, mac_lo, ver;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
+ eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
+ pci_dev = eth_dev->pci_dev;
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ /* Vendor and Device ID need to be set before init of shared code */
+ hw->device_id = pci_dev->id.device_id;
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
+ hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
+
+ hw->num_rx_queues = 1;
+ hw->num_tx_queues = 1;
+ hw->bufs_per_pkt = 1;
+
+ /* Check h/w version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
+ PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
+ if (ver & 0x1)
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
+ else {
+ PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
+ return -EIO;
+ }
+
+ /* Check UPT version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
+ PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
+ if (ver & 0x1)
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
+ else {
+ PMD_INIT_LOG(ERR, "Incompatible UPT version.");
+ return -EIO;
+ }
+
+ /* Getting MAC Address */
+ mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
+ mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
+ memcpy(hw->perm_addr , &mac_lo, 4);
+ memcpy(hw->perm_addr+4, &mac_hi, 2);
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
+ VMXNET3_MAX_MAC_ADDRS, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+ return -ENOMEM;
+ }
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *) hw->perm_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+
+ /* Put device in Quiesce Mode */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+
+ /* allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+
+ return 0;
+}
+
+static int
+eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (hw->adapter_stopped == 0)
+ vmxnet3_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static struct eth_driver rte_vmxnet3_pmd = {
+ .pci_drv = {
+ .name = "rte_vmxnet3_pmd",
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ },
+ .eth_dev_init = eth_vmxnet3_dev_init,
+ .eth_dev_uninit = eth_vmxnet3_dev_uninit,
+ .dev_private_size = sizeof(struct vmxnet3_hw),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
+ */
+static int
+rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_eth_driver_register(&rte_vmxnet3_pmd);
+ return 0;
+}
+
+static int
+vmxnet3_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ size_t size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->nb_rx_queues > UINT8_MAX ||
+ dev->data->nb_tx_queues > UINT8_MAX)
+ return -EINVAL;
+
+ size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
+
+ if (size > UINT16_MAX)
+ return -EINVAL;
+
+ hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
+ hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
+
+ /*
+ * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
+ * on current socket
+ */
+ mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
+ "shared", rte_socket_id(), 8);
+
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->shared = mz->addr;
+ hw->sharedPA = mz->phys_addr;
+
+ /*
+ * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
+ * on current socket
+ */
+ mz = gpa_zone_reserve(dev, size, "queuedesc",
+ rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
+ hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
+
+ hw->queueDescPA = mz->phys_addr;
+ hw->queue_desc_len = (uint16_t)size;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+
+ /* Allocate memory structure for UPT1_RSSConf and configure */
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
+ rte_socket_id(), RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR,
+ "ERROR: Creating rss_conf structure zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ hw->rss_conf = mz->addr;
+ hw->rss_confPA = mz->phys_addr;
+ }
+
+ return 0;
+}
+
+static void
+vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
+{
+ uint32_t val;
+
+ PMD_INIT_LOG(DEBUG,
+ "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ val = *(const uint32_t *)addr;
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
+
+ val = (addr[5] << 8) | addr[4];
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
+}
+
+static int
+vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf port_conf = dev->data->dev_conf;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t mtu = dev->data->mtu;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ uint32_t i;
+ int ret;
+
+ shared->magic = VMXNET3_REV1_MAGIC;
+ devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
+
+ /* Setting up Guest OS information */
+ devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
+ VMXNET3_GOS_BITS_32 :
+ VMXNET3_GOS_BITS_64;
+ devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+ devRead->misc.driverInfo.vmxnet3RevSpt = 1;
+ devRead->misc.driverInfo.uptVerSpt = 1;
+
+ devRead->misc.mtu = rte_le_to_cpu_32(mtu);
+ devRead->misc.queueDescPA = hw->queueDescPA;
+ devRead->misc.queueDescLen = hw->queue_desc_len;
+ devRead->misc.numTxQueues = hw->num_tx_queues;
+ devRead->misc.numRxQueues = hw->num_rx_queues;
+
+ /*
+ * Set number of interrupts to 1
+ * PMD disables all the interrupts but this is MUST to activate device
+ * It needs at least one interrupt for link events to handle
+ * So we'll disable it later after device activation if needed
+ */
+ devRead->intrConf.numIntrs = 1;
+ devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
+ vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
+
+ tqd->ctrl.txNumDeferred = 0;
+ tqd->ctrl.txThreshold = 1;
+ tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
+ tqd->conf.compRingBasePA = txq->comp_ring.basePA;
+ tqd->conf.dataRingBasePA = txq->data_ring.basePA;
+
+ tqd->conf.txRingSize = txq->cmd_ring.size;
+ tqd->conf.compRingSize = txq->comp_ring.size;
+ tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.intrIdx = txq->comp_ring.intr_idx;
+ tqd->status.stopped = TRUE;
+ tqd->status.error = 0;
+ memset(&tqd->stats, 0, sizeof(tqd->stats));
+ }
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
+ rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
+ rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
+
+ rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
+ rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
+ rqd->conf.compRingSize = rxq->comp_ring.size;
+ rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ rqd->status.stopped = TRUE;
+ rqd->status.error = 0;
+ memset(&rqd->stats, 0, sizeof(rqd->stats));
+ }
+
+ /* RxMode set to 0 of VMXNET3_RXM_xxx */
+ devRead->rxFilterConf.rxMode = 0;
+
+ /* Setting up feature flags */
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
+
+ if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ ret = vmxnet3_rss_configure(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ devRead->misc.uptFeatures |= VMXNET3_F_RSS;
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
+ devRead->rssConfDesc.confPA = hw->rss_confPA;
+ }
+
+ vmxnet3_dev_vlan_offload_set(dev,
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+
+ vmxnet3_write_mac(hw, hw->perm_addr);
+
+ return VMXNET3_SUCCESS;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
+ * It returns 0 on success.
+ */
+static int
+vmxnet3_dev_start(struct rte_eth_dev *dev)
+{
+ int status, ret;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = vmxnet3_setup_driver_shared(dev);
+ if (ret != VMXNET3_SUCCESS)
+ return ret;
+
+ /* Exchange shared data with device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
+ VMXNET3_GET_ADDR_LO(hw->sharedPA));
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
+ VMXNET3_GET_ADDR_HI(hw->sharedPA));
+
+ /* Activate device by register write */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
+ status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (status != 0) {
+ PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
+ return -1;
+ }
+
+ /* Disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ /*
+ * Load RX queues with blank mbufs and update next2fill index for device
+ * Update RxMode of the device
+ */
+ ret = vmxnet3_dev_rxtx_init(dev);
+ if (ret != VMXNET3_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
+ return ret;
+ }
+
+ /* Setting proper Rx Mode and issue Rx Mode Update command */
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
+
+ /*
+ * Don't need to handle events for now
+ */
+#if PROCESS_SYS_EVENTS == 1
+ events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
+ PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
+ vmxnet3_process_events(hw);
+#endif
+ return status;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+vmxnet3_dev_stop(struct rte_eth_dev *dev)
+{
+ struct rte_eth_link link;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->adapter_stopped == 1) {
+ PMD_INIT_LOG(DEBUG, "Device already closed.");
+ return;
+ }
+
+ /* disable interrupts */
+ vmxnet3_disable_intr(hw);
+
+ /* quiesce the device first */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
+
+ /* reset the device */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ PMD_INIT_LOG(DEBUG, "Device reset.");
+ hw->adapter_stopped = 0;
+
+ vmxnet3_dev_clear_queues(dev);
+
+ /* Clear recorded link status */
+ memset(&link, 0, sizeof(link));
+ vmxnet3_dev_atomic_write_link_status(dev, &link);
+}
+
+/*
+ * Reset and stop device.
+ */
+static void
+vmxnet3_dev_close(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmxnet3_dev_stop(dev);
+ hw->adapter_stopped = 1;
+}
+
+static void
+vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned int i;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
+ for (i = 0; i < hw->num_tx_queues; i++) {
+ struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
+
+ stats->q_opackets[i] = txStats->ucastPktsTxOK +
+ txStats->mcastPktsTxOK +
+ txStats->bcastPktsTxOK;
+ stats->q_obytes[i] = txStats->ucastBytesTxOK +
+ txStats->mcastBytesTxOK +
+ txStats->bcastBytesTxOK;
+
+ stats->opackets += stats->q_opackets[i];
+ stats->obytes += stats->q_obytes[i];
+ stats->oerrors += txStats->pktsTxError +
+ txStats->pktsTxDiscard;
+ }
+
+ RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
+
+ stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
+ rxStats->mcastPktsRxOK +
+ rxStats->bcastPktsRxOK;
+
+ stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
+ rxStats->mcastBytesRxOK +
+ rxStats->bcastBytesRxOK;
+
+ stats->ipackets += stats->q_ipackets[i];
+ stats->ibytes += stats->q_ibytes[i];
+
+ stats->q_errors[i] = rxStats->pktsRxError;
+ stats->ierrors += rxStats->pktsRxError;
+ stats->imcasts += rxStats->mcastPktsRxOK;
+ stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
+ }
+}
+
+static void
+vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+ dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+ dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
+ dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
+
+ dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
+ dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_RX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_RX_RING_SIZE,
+ .nb_align = 1,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = VMXNET3_TX_RING_MAX_SIZE,
+ .nb_min = VMXNET3_DEF_TX_RING_SIZE,
+ .nb_align = 1,
+ };
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+}
+
+static const uint32_t *
+vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+static void
+vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_write_mac(hw, mac_addr->addr_bytes);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct rte_eth_link old, link;
+ uint32_t ret;
+
+ if (dev->data->dev_started == 0)
+ return -1; /* Link status doesn't change for stopped dev */
+
+ memset(&link, 0, sizeof(link));
+ vmxnet3_dev_atomic_read_link_status(dev, &old);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ if (ret & 0x1) {
+ link.link_status = ETH_LINK_UP;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_SPEED_FIXED;
+ }
+
+ vmxnet3_dev_atomic_write_link_status(dev, &link);
+
+ return (old.link_status == link.link_status) ? -1 : 0;
+}
+
+/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
+static void
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
+
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+
+ if (set)
+ rxConf->rxMode = rxConf->rxMode | feature;
+ else
+ rxConf->rxMode = rxConf->rxMode & (~feature);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+
+ memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
+}
+
+/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
+static void
+vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
+}
+
+/* Enable/disable filter on vlan */
+static int
+vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
+ uint32_t *vf_table = rxConf->vfTable;
+
+ /* save state for restore */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
+
+ /* don't change active filter if in promiscuous mode */
+ if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
+ return 0;
+
+ /* set in hardware */
+ if (on)
+ VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
+ else
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ return 0;
+}
+
+static void
+vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
+ uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ else
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ }
+}
+
+#if PROCESS_SYS_EVENTS == 1
+static void
+vmxnet3_process_events(struct vmxnet3_hw *hw)
+{
+ uint32_t events = hw->shared->ecr;
+
+ if (!events) {
+ PMD_INIT_LOG(ERR, "No events to process");
+ return;
+ }
+
+ /*
+ * ECR bits when written with 1b are cleared. Hence write
+ * events back to ECR so that the bits which were set will be reset.
+ */
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
+
+ /* Check if link state has changed */
+ if (events & VMXNET3_ECR_LINK)
+ PMD_INIT_LOG(ERR,
+ "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
+
+ /* Check if there is an error on xmit/recv queues */
+ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
+
+ if (hw->tqd_start->status.stopped)
+ PMD_INIT_LOG(ERR, "tq error 0x%x",
+ hw->tqd_start->status.error);
+
+ if (hw->rqd_start->status.stopped)
+ PMD_INIT_LOG(ERR, "rq error 0x%x",
+ hw->rqd_start->status.error);
+
+ /* Reset the device */
+ /* Have to reset the device */
+ }
+
+ if (events & VMXNET3_ECR_DIC)
+ PMD_INIT_LOG(ERR, "Device implementation change event.");
+
+ if (events & VMXNET3_ECR_DEBUG)
+ PMD_INIT_LOG(ERR, "Debug event generated by device.");
+
+}
+#endif
+
+static struct rte_driver rte_vmxnet3_driver = {
+ .type = PMD_PDEV,
+ .init = rte_vmxnet3_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_vmxnet3_driver);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
new file mode 100644
index 00000000..4f9d0bd2
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_ETHDEV_H_
+#define _VMXNET3_ETHDEV_H_
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#define VMXNET3_ASSERT(x) do { \
+ if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
+} while(0)
+#else
+#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
+#endif
+
+#define VMXNET3_MAX_MAC_ADDRS 1
+
+/* UPT feature to negotiate */
+#define VMXNET3_F_RXCSUM 0x0001
+#define VMXNET3_F_RSS 0x0002
+#define VMXNET3_F_RXVLAN 0x0004
+#define VMXNET3_F_LRO 0x0008
+
+/* Hash Types supported by device */
+#define VMXNET3_RSS_HASH_TYPE_NONE 0x0
+#define VMXNET3_RSS_HASH_TYPE_IPV4 0x01
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define VMXNET3_RSS_HASH_TYPE_IPV6 0x04
+#define VMXNET3_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define VMXNET3_RSS_HASH_FUNC_NONE 0x0
+#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ 0x01
+
+#define VMXNET3_RSS_MAX_KEY_SIZE 40
+#define VMXNET3_RSS_MAX_IND_TABLE_SIZE 128
+
+#define VMXNET3_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP)
+
+/* RSS configuration structure - shared with device through GPA */
+typedef
+struct VMXNET3_RSSConf {
+ uint16_t hashType;
+ uint16_t hashFunc;
+ uint16_t hashKeySize;
+ uint16_t indTableSize;
+ uint8_t hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
+ /*
+ * indTable is only element that can be changed without
+ * device quiesce-reset-update-activation cycle
+ */
+ uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
+} VMXNET3_RSSConf;
+
+typedef
+struct vmxnet3_mf_table {
+ void *mfTableBase; /* Multicast addresses list */
+ uint64_t mfTablePA; /* Physical address of the list */
+ uint16_t num_addrs; /* number of multicast addrs */
+} vmxnet3_mf_table_t;
+
+struct vmxnet3_hw {
+
+ uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
+ uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
+ /* BAR2: MSI-X Regs */
+ /* BAR3: Port IO */
+ void *back;
+
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+ bool adapter_stopped;
+
+ uint8_t perm_addr[ETHER_ADDR_LEN];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t bufs_per_pkt;
+
+ Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
+ Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
+
+ Vmxnet3_DriverShared *shared;
+ uint64_t sharedPA;
+
+ uint64_t queueDescPA;
+ uint16_t queue_desc_len;
+
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
+};
+
+#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
+#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
+
+/* Config space read/writes */
+
+#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint32_t vmxnet3_read_addr(volatile void *addr)
+{
+ return VMXNET3_PCI_REG(addr);
+}
+
+#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
+ VMXNET3_PCI_REG((reg)) = (value); \
+} while(0)
+
+#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
+#define VMXNET3_READ_BAR0_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR0_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)), (value))
+
+#define VMXNET3_PCI_BAR1_REG_ADDR(hw, reg) \
+ ((volatile uint32_t *)((char *)(hw)->hw_addr1 + (reg)))
+#define VMXNET3_READ_BAR1_REG(hw, reg) \
+ vmxnet3_read_addr(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)))
+#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
+ VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+
+/*
+ * RX/TX function prototypes
+ */
+
+void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
+
+void vmxnet3_dev_rx_queue_release(void *rxq);
+void vmxnet3_dev_tx_queue_release(void *txq);
+
+int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
+
+int vmxnet3_rss_configure(struct rte_eth_dev *dev);
+
+uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_logs.h b/drivers/net/vmxnet3/vmxnet3_logs.h
new file mode 100644
index 00000000..82639a08
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_logs.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_LOGS_H_
+#define _VMXNET3_LOGS_H_
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _VMXNET3_LOGS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
new file mode 100644
index 00000000..69ff2ded
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -0,0 +1,169 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VMXNET3_RING_H_
+#define _VMXNET3_RING_H_
+
+#define VMXNET3_RX_CMDRING_SIZE 2
+
+#define VMXNET3_DRIVER_VERSION_NUM 0x01012000
+
+/* Default ring size */
+#define VMXNET3_DEF_TX_RING_SIZE 512
+#define VMXNET3_DEF_RX_RING_SIZE 128
+
+#define VMXNET3_SUCCESS 0
+#define VMXNET3_FAIL -1
+
+#define TRUE 1
+#define FALSE 0
+
+
+typedef struct vmxnet3_buf_info {
+ uint16_t len;
+ struct rte_mbuf *m;
+ uint64_t bufPA;
+} vmxnet3_buf_info_t;
+
+typedef struct vmxnet3_cmd_ring {
+ vmxnet3_buf_info_t *buf_info;
+ uint32_t size;
+ uint32_t next2fill;
+ uint32_t next2comp;
+ uint8_t gen;
+ uint8_t rid;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_cmd_ring_t;
+
+static inline void
+vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
+{
+ ring->next2fill++;
+ if (unlikely(ring->next2fill == ring->size)) {
+ ring->next2fill = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+static inline void
+vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
+{
+ VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+}
+
+static inline uint32_t
+vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
+ ring->next2comp - ring->next2fill - 1;
+}
+
+static inline bool
+vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
+{
+ return ring->next2comp == ring->next2fill;
+}
+
+typedef struct vmxnet3_comp_ring {
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
+ Vmxnet3_GenericDesc *base;
+ uint64_t basePA;
+} vmxnet3_comp_ring_t;
+
+struct vmxnet3_data_ring {
+ struct Vmxnet3_TxDataDesc *base;
+ uint32_t size;
+ uint64_t basePA;
+};
+
+static inline void
+vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
+{
+ ring->next2proc++;
+ if (unlikely(ring->next2proc == ring->size)) {
+ ring->next2proc = 0;
+ ring->gen = (uint8_t)(ring->gen ^ 1);
+ }
+}
+
+struct vmxnet3_txq_stats {
+ uint64_t drop_total; /* # of pkts dropped by the driver,
+ * the counters below track droppings due to
+ * different reasons
+ */
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
+};
+
+typedef struct vmxnet3_tx_queue {
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_data_ring data_ring;
+ uint32_t qid;
+ struct Vmxnet3_TxQueueDesc *shared;
+ struct vmxnet3_txq_stats stats;
+ bool stopped;
+ uint16_t queue_id; /**< Device TX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+} vmxnet3_tx_queue_t;
+
+struct vmxnet3_rxq_stats {
+ uint64_t drop_total;
+ uint64_t drop_err;
+ uint64_t drop_fcs;
+ uint64_t rx_buf_alloc_failure;
+};
+
+typedef struct vmxnet3_rx_queue {
+ struct rte_mempool *mp;
+ struct vmxnet3_hw *hw;
+ struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
+ struct vmxnet3_comp_ring comp_ring;
+ uint32_t qid1;
+ uint32_t qid2;
+ Vmxnet3_RxQueueDesc *shared;
+ struct rte_mbuf *start_seg;
+ struct rte_mbuf *last_seg;
+ struct vmxnet3_rxq_stats stats;
+ bool stopped;
+ uint16_t queue_id; /**< Device RX queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+} vmxnet3_rx_queue_t;
+
+#endif /* _VMXNET3_RING_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
new file mode 100644
index 00000000..4ac0456c
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -0,0 +1,1105 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "base/vmxnet3_defs.h"
+#include "vmxnet3_ring.h"
+
+#include "vmxnet3_logs.h"
+#include "vmxnet3_ethdev.h"
+
+static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+
+static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
+static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
+static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
+#endif
+
+static struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
+static void
+vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
+{
+ uint32_t avail = 0;
+
+ if (rxq == NULL)
+ return;
+
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
+ rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
+ (unsigned long)rxq->cmd_ring[0].basePA,
+ (unsigned long)rxq->cmd_ring[1].basePA,
+ (unsigned long)rxq->comp_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
+ PMD_RX_LOG(DEBUG,
+ "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[0].size, avail,
+ rxq->comp_ring.next2proc,
+ rxq->cmd_ring[0].size - avail);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
+ PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
+ rxq->cmd_ring[1].size - avail);
+
+}
+
+static void
+vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
+{
+ uint32_t avail = 0;
+
+ if (txq == NULL)
+ return;
+
+ PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.",
+ txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
+ PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
+ (unsigned long)txq->cmd_ring.basePA,
+ (unsigned long)txq->comp_ring.basePA,
+ (unsigned long)txq->data_ring.basePA);
+
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
+ (uint32_t)txq->cmd_ring.size, avail,
+ txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+}
+#endif
+
+static void
+vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+ while (ring->next2comp != ring->next2fill) {
+ /* No need to worry about tx desc ownership, device is quiesced by now. */
+ vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
+
+ if (buf_info->m) {
+ rte_pktmbuf_free(buf_info->m);
+ buf_info->m = NULL;
+ buf_info->bufPA = 0;
+ buf_info->len = 0;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(ring);
+ }
+}
+
+static void
+vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+{
+ vmxnet3_cmd_ring_release_mbufs(ring);
+ rte_free(ring->buf_info);
+ ring->buf_info = NULL;
+}
+
+
+void
+vmxnet3_dev_tx_queue_release(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring */
+ vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ }
+}
+
+void
+vmxnet3_dev_rx_queue_release(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+
+ if (rq != NULL) {
+ /* Release both the cmd_rings */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+ }
+}
+
+static void
+vmxnet3_dev_tx_queue_reset(void *txq)
+{
+ vmxnet3_tx_queue_t *tq = txq;
+ struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
+ struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
+ struct vmxnet3_data_ring *data_ring = &tq->data_ring;
+ int size;
+
+ if (tq != NULL) {
+ /* Release the cmd_ring mbufs */
+ vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring);
+ }
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+
+ memset(ring->base, 0, size);
+}
+
+static void
+vmxnet3_dev_rx_queue_reset(void *rxq)
+{
+ int i;
+ vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_cmd_ring *ring0, *ring1;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
+
+ if (rq != NULL) {
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+ }
+
+ ring0 = &rq->cmd_ring[0];
+ ring1 = &rq->cmd_ring[1];
+ comp_ring = &rq->comp_ring;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+
+ memset(ring0->base, 0, size);
+}
+
+void
+vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->stopped = TRUE;
+ vmxnet3_dev_tx_queue_reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ rxq->stopped = TRUE;
+ vmxnet3_dev_rx_queue_reset(rxq);
+ }
+ }
+}
+
+static int
+vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
+{
+ int completed = 0;
+ struct rte_mbuf *mbuf;
+
+ /* Release cmd_ring descriptor and free mbuf */
+ VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+
+ mbuf = txq->cmd_ring.buf_info[eop_idx].m;
+ if (mbuf == NULL)
+ rte_panic("EOP desc does not point to a valid mbuf");
+ rte_pktmbuf_free(mbuf);
+
+ txq->cmd_ring.buf_info[eop_idx].m = NULL;
+
+ while (txq->cmd_ring.next2comp != eop_idx) {
+ /* no out-of-order completion */
+ VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+ completed++;
+ }
+
+ /* Mark the txd for which tcd was generated as completed */
+ vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+
+ return completed + 1;
+}
+
+static void
+vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
+{
+ int completed = 0;
+ vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
+ struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
+ (comp_ring->base + comp_ring->next2proc);
+
+ while (tcd->gen == comp_ring->gen) {
+ completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
+
+ vmxnet3_comp_ring_adv_next2proc(comp_ring);
+ tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
+ comp_ring->next2proc);
+ }
+
+ PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+}
+
+uint16_t
+vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ vmxnet3_tx_queue_t *txq = tx_queue;
+ struct vmxnet3_hw *hw = txq->hw;
+ Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
+ uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
+
+ if (unlikely(txq->stopped)) {
+ PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
+ return 0;
+ }
+
+ /* Free up the comp_descriptors aggressively */
+ vmxnet3_tq_tx_complete(txq);
+
+ nb_tx = 0;
+ while (nb_tx < nb_pkts) {
+ Vmxnet3_GenericDesc *gdesc;
+ vmxnet3_buf_info_t *tbi;
+ uint32_t first2fill, avail, dw2;
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ struct rte_mbuf *m_seg = txm;
+ int copy_size = 0;
+ bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+ /* # of descriptors needed for a packet. */
+ unsigned count = txm->nb_segs;
+
+ avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
+ if (count > avail) {
+ /* Is command ring full? */
+ if (unlikely(avail == 0)) {
+ PMD_TX_LOG(DEBUG, "No free ring descriptors");
+ txq->stats.tx_ring_full++;
+ txq->stats.drop_total += (nb_pkts - nb_tx);
+ break;
+ }
+
+ /* Command ring is not full but cannot handle the
+ * multi-segmented packet. Let's try the next packet
+ * in this case.
+ */
+ PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
+ "(avail %d needed %d)", avail, count);
+ txq->stats.drop_total++;
+ if (tso)
+ txq->stats.drop_tso++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
+ /* Drop non-TSO packet that is excessively fragmented */
+ if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
+ PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
+ "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
+ txq->stats.drop_too_many_segs++;
+ txq->stats.drop_total++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
+ if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ copy_size = rte_pktmbuf_pkt_len(txm);
+ rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
+ }
+
+ /* use the previous gen bit for the SOP desc */
+ dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ first2fill = txq->cmd_ring.next2fill;
+ do {
+ /* Remember the transmit buffer for cleanup */
+ tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
+
+ /* NB: the following assumes that VMXNET3 maximum
+ * transmit buffer size (16K) is greater than
+ * maximum size of mbuf segment size.
+ */
+ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+ if (copy_size)
+ gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
+ else
+ gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+
+ gdesc->dword[2] = dw2 | m_seg->data_len;
+ gdesc->dword[3] = 0;
+
+ /* move to the next2fill descriptor */
+ vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
+
+ /* use the right gen for non-SOP desc */
+ dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ } while ((m_seg = m_seg->next) != NULL);
+
+ /* set the last buf_info for the pkt */
+ tbi->m = txm;
+ /* Update the EOP descriptor */
+ gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
+
+ /* Add VLAN tag if present */
+ gdesc = txq->cmd_ring.base + first2fill;
+ if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ gdesc->txd.ti = 1;
+ gdesc->txd.tci = txm->vlan_tci;
+ }
+
+ if (tso) {
+ uint16_t mss = txm->tso_segsz;
+
+ VMXNET3_ASSERT(mss > 0);
+
+ gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.msscof = mss;
+
+ deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
+ } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.hlen = txm->l2_len + txm->l3_len;
+
+ switch (txm->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
+ break;
+ case PKT_TX_UDP_CKSUM:
+ gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
+ break;
+ default:
+ PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
+ txm->ol_flags & PKT_TX_L4_MASK);
+ abort();
+ }
+ deferred++;
+ } else {
+ gdesc->txd.hlen = 0;
+ gdesc->txd.om = VMXNET3_OM_NONE;
+ gdesc->txd.msscof = 0;
+ deferred++;
+ }
+
+ /* flip the GEN bit on the SOP */
+ rte_compiler_barrier();
+ gdesc->dword[2] ^= VMXNET3_TXD_GEN;
+
+ txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
+ nb_tx++;
+ }
+
+ PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
+
+ if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
+ txq_ctrl->txNumDeferred = 0;
+ /* Notify vSwitch that packets are available. */
+ VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
+ txq->cmd_ring.next2fill);
+ }
+
+ return nb_tx;
+}
+
+/*
+ * Allocates mbufs and clusters. Post rx descriptors with buffer details
+ * so that device can receive packets in those buffers.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type1.
+ * bufs_per_pkt is set such that for non-LRO cases all the buffers required
+ * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
+ * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
+ * only for LRO.
+ *
+ */
+static int
+vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
+{
+ int err = 0;
+ uint32_t i = 0, val = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
+ val = VMXNET3_RXD_BTYPE_HEAD;
+ } else {
+ /* All BODY type buffers for 2nd ring */
+ val = VMXNET3_RXD_BTYPE_BODY;
+ }
+
+ while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
+ struct Vmxnet3_RxDesc *rxd;
+ struct rte_mbuf *mbuf;
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+ rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+
+ /* Allocate blank mbuf for the current Rx Descriptor */
+ mbuf = rte_rxmbuf_alloc(rxq->mp);
+ if (unlikely(mbuf == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf");
+ rxq->stats.rx_buf_alloc_failure++;
+ err = ENOMEM;
+ break;
+ }
+
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
+ buf_info->m = mbuf;
+ buf_info->len = (uint16_t)(mbuf->buf_len -
+ RTE_PKTMBUF_HEADROOM);
+ buf_info->bufPA =
+ rte_mbuf_data_dma_addr_default(mbuf);
+
+ /* Load Rx Descriptor with the buffer's GPA */
+ rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
+ rxd->btype = val;
+ rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
+ rxd->gen = ring->gen;
+
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ i++;
+ }
+
+ /* Return error only if no buffers are posted at present */
+ if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
+ return -err;
+ else
+ return i;
+}
+
+
+/* Receive side checksum and other offloads */
+static void
+vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+{
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ rxm->ol_flags |= PKT_RX_VLAN_PKT;
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ rxm->ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
+
+ /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
+ if (rcd->v4) {
+ struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
+ struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+
+ if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
+ rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
+ else
+ rxm->packet_type = RTE_PTYPE_L3_IPV4;
+
+ if (!rcd->cnc) {
+ if (!rcd->ipc)
+ rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if ((rcd->tcp || rcd->udp) && !rcd->tuc)
+ rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+}
+
+/*
+ * Process the Rx Completion Ring of given vmxnet3_rx_queue
+ * for nb_pkts burst and return the number of packets received
+ */
+uint16_t
+vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+ uint32_t nb_rxd, idx;
+ uint8_t ring_idx;
+ vmxnet3_rx_queue_t *rxq;
+ Vmxnet3_RxCompDesc *rcd;
+ vmxnet3_buf_info_t *rbi;
+ Vmxnet3_RxDesc *rxd;
+ struct rte_mbuf *rxm = NULL;
+ struct vmxnet3_hw *hw;
+
+ nb_rx = 0;
+ ring_idx = 0;
+ nb_rxd = 0;
+ idx = 0;
+
+ rxq = rx_queue;
+ hw = rxq->hw;
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+
+ if (unlikely(rxq->stopped)) {
+ PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
+ return 0;
+ }
+
+ while (rcd->gen == rxq->comp_ring.gen) {
+ if (nb_rx >= nb_pkts)
+ break;
+
+ idx = rcd->rxdIdx;
+ ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+ rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
+
+ PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
+
+ VMXNET3_ASSERT(rcd->len <= rxd->len);
+ VMXNET3_ASSERT(rbi->m);
+
+ /* Get the packet buffer pointer from buf_info */
+ rxm = rbi->m;
+
+ /* Clear descriptor associated buf_info to be reused */
+ rbi->m = NULL;
+ rbi->bufPA = 0;
+
+ /* Update the index that we received a packet */
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+
+ /* For RCD with EOP set, check if there is frame error */
+ if (unlikely(rcd->eop && rcd->err)) {
+ rxq->stats.drop_total++;
+ rxq->stats.drop_err++;
+
+ if (!rcd->fcs) {
+ rxq->stats.drop_fcs++;
+ PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
+ }
+ PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
+ (int)(rcd - (struct Vmxnet3_RxCompDesc *)
+ rxq->comp_ring.base), rcd->rxdIdx);
+ rte_pktmbuf_free_seg(rxm);
+ goto rcd_done;
+ }
+
+
+ /* Initialize newly received packet buffer */
+ rxm->port = rxq->port_id;
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = (uint16_t)rcd->len;
+ rxm->data_len = (uint16_t)rcd->len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (rcd->sop) {
+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+
+ if (unlikely(rcd->len == 0)) {
+ VMXNET3_ASSERT(rcd->eop);
+
+ PMD_RX_LOG(DEBUG,
+ "Rx buf was skipped. rxring[%d][%d])",
+ ring_idx, idx);
+ rte_pktmbuf_free_seg(rxm);
+ goto rcd_done;
+ }
+
+ rxq->start_seg = rxm;
+ vmxnet3_rx_offload(rcd, rxm);
+ } else {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+
+ start->pkt_len += rxm->data_len;
+ start->nb_segs++;
+
+ rxq->last_seg->next = rxm;
+ }
+ rxq->last_seg = rxm;
+
+ if (rcd->eop) {
+ rx_pkts[nb_rx++] = rxq->start_seg;
+ rxq->start_seg = NULL;
+ }
+
+rcd_done:
+ rxq->cmd_ring[ring_idx].next2comp = idx;
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+
+ /* It's time to allocate some new buf and renew descriptors */
+ vmxnet3_post_rx_bufs(rxq, ring_idx);
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+
+ /* Advance to the next descriptor in comp_ring */
+ vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
+
+ rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+ nb_rxd++;
+ if (nb_rxd > rxq->cmd_ring[0].size) {
+ PMD_RX_LOG(ERR,
+ "Used up quota of receiving packets,"
+ " relinquish control.");
+ break;
+ }
+ }
+
+ return nb_rx;
+}
+
+/*
+ * Create memzone for device rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ return rte_memzone_reserve_aligned(z_name, ring_size,
+ socket_id, 0, VMXNET3_RING_BA_ALIGN);
+}
+
+int
+vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ const struct rte_memzone *mz;
+ struct vmxnet3_tx_queue *txq;
+ struct vmxnet3_cmd_ring *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_data_ring *data_ring;
+ int size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
+ ETH_TXQ_FLAGS_NOXSUMSCTP) {
+ PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
+ return -EINVAL;
+ }
+
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->shared = &hw->tqd_start[queue_idx];
+ txq->hw = hw;
+ txq->qid = queue_idx;
+ txq->stopped = TRUE;
+
+ ring = &txq->cmd_ring;
+ comp_ring = &txq->comp_ring;
+ data_ring = &txq->data_ring;
+
+ /* Tx vmxnet ring length should be between 512-4096 */
+ if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
+ VMXNET3_DEF_TX_RING_SIZE);
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
+ VMXNET3_TX_RING_MAX_SIZE);
+ return -EINVAL;
+ } else {
+ ring->size = nb_desc;
+ ring->size &= ~VMXNET3_RING_SIZE_MASK;
+ }
+ comp_ring->size = data_ring->size = ring->size;
+
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+
+ mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring initialization */
+ ring->base = mz->addr;
+ ring->basePA = mz->phys_addr;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring->base + ring->size;
+ comp_ring->basePA = ring->basePA +
+ (sizeof(struct Vmxnet3_TxDesc) * ring->size);
+
+ /* data_ring initialization */
+ data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
+
+ /* cmd_ring0 buf_info allocation */
+ ring->buf_info = rte_zmalloc("tx_ring_buf_info",
+ ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
+ return -ENOMEM;
+ }
+
+ /* Update the data portion with txq */
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+int
+vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ const struct rte_memzone *mz;
+ struct vmxnet3_rx_queue *rxq;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
+ uint8_t i;
+ char mem_name[32];
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->shared = &hw->rqd_start[queue_idx];
+ rxq->hw = hw;
+ rxq->qid1 = queue_idx;
+ rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->stopped = TRUE;
+
+ ring0 = &rxq->cmd_ring[0];
+ ring1 = &rxq->cmd_ring[1];
+ comp_ring = &rxq->comp_ring;
+
+ /* Rx vmxnet rings length should be between 256-4096 */
+ if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
+ return -EINVAL;
+ } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
+ PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
+ return -EINVAL;
+ } else {
+ ring0->size = nb_desc;
+ ring0->size &= ~VMXNET3_RING_SIZE_MASK;
+ ring1->size = ring0->size;
+ }
+
+ comp_ring->size = ring0->size + ring1->size;
+
+ /* Rx vmxnet rings structure initialization */
+ ring0->next2fill = 0;
+ ring1->next2fill = 0;
+ ring0->next2comp = 0;
+ ring1->next2comp = 0;
+ ring0->gen = VMXNET3_INIT_GEN;
+ ring1->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
+
+ size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
+ size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+
+ mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ /* cmd_ring0 initialization */
+ ring0->base = mz->addr;
+ ring0->basePA = mz->phys_addr;
+
+ /* cmd_ring1 initialization */
+ ring1->base = ring0->base + ring0->size;
+ ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
+
+ /* comp_ring initialization */
+ comp_ring->base = ring1->base + ring1->size;
+ comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
+ ring1->size;
+
+ /* cmd_ring0-cmd_ring1 buf_info allocation */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
+
+ ring = &rxq->cmd_ring[i];
+ ring->rid = i;
+ snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+
+ ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
+ return -ENOMEM;
+ }
+ }
+
+ /* Update the data portion with rxq */
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+}
+
+/*
+ * Initializes Receive Unit
+ * Load mbufs in rx queue in advance
+ */
+int
+vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+
+ int i, ret;
+ uint8_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->num_rx_queues; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
+ /* Passing 0 as alloc_num will allocate full ring */
+ ret = vmxnet3_post_rx_bufs(rxq, j);
+ if (ret <= 0) {
+ PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+ return -ret;
+ }
+ /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[j].next2fill);
+ }
+ }
+ rxq->stopped = FALSE;
+ rxq->start_seg = NULL;
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
+ txq->stopped = FALSE;
+ }
+
+ return 0;
+}
+
+static uint8_t rss_intel_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+/*
+ * Configure RSS feature
+ */
+int
+vmxnet3_rss_configure(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct VMXNET3_RSSConf *dev_rss_conf;
+ struct rte_eth_rss_conf *port_rss_conf;
+ uint64_t rss_hf;
+ uint8_t i, j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_rss_conf = hw->rss_conf;
+ port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ /* loading hashFunc */
+ dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
+ /* loading hashKeySize */
+ dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
+ /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
+
+ if (port_rss_conf->rss_key == NULL) {
+ /* Default hash key */
+ port_rss_conf->rss_key = rss_intel_key;
+ }
+
+ /* loading hashKey */
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+
+ /* loading indTable */
+ for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
+ if (j == dev->data->nb_rx_queues)
+ j = 0;
+ dev_rss_conf->indTable[i] = j;
+ }
+
+ /* loading hashType */
+ dev_rss_conf->hashType = 0;
+ rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
+ if (rss_hf & ETH_RSS_IPV4)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_IPV6)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
+
+ return VMXNET3_SUCCESS;
+}