summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/Makefile5
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h85
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c203
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c148
6 files changed, 426 insertions, 69 deletions
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 23ff1da2..84356ae2 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -76,9 +76,4 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index 68ae8b6d..bfa9622d 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -109,6 +109,9 @@ typedef enum {
VMXNET3_CMD_STOP_EMULATION,
VMXNET3_CMD_LOAD_PLUGIN,
VMXNET3_CMD_ACTIVATE_VF,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -120,7 +123,9 @@ typedef enum {
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
VMXNET3_CMD_GET_CONF_INTR,
- VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_RESERVED5,
} Vmxnet3_Cmd;
/* Adaptive Ring Info Flags */
@@ -402,12 +407,25 @@ typedef union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
/* a list of reasons for queue stop */
#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
@@ -507,7 +525,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad[1];
+ __le16 txDataRingDescSize;
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_TxQueueConf;
@@ -518,12 +538,14 @@ struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
- __le64 reserved;
+ __le64 rxDataRingBasePA;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_RxQueueConf;
@@ -695,12 +717,65 @@ Vmxnet3_RxQueueDesc;
typedef
#include "vmware_pack_begin.h"
+struct Vmxnet3_SetPolling {
+ uint8 enablePolling;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_SetPolling;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits; /* bit n corresponding to tx queue n */
+ __le16 rxQueueBits; /* bit n corresponding to rx queue n */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemoryRegion;
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ Vmxnet3_MemoryRegion memRegs[1];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemRegs;
+
+/*
+ * If the command data <= 16 bytes, use the shared memory direcly.
+ * Otherwise, use the variable length configuration descriptor.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union Vmxnet3_CmdInfo {
+ Vmxnet3_VariableLenConfDesc varConf;
+ Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_CmdInfo;
+
+typedef
+#include "vmware_pack_begin.h"
struct Vmxnet3_DriverShared {
__le32 magic;
__le32 pad; /* make devRead start at 64-bit boundaries */
Vmxnet3_DSDevRead devRead;
__le32 ecr;
- __le32 reserved[5];
+ __le32 reserved;
+
+ union {
+ __le32 reserved1[4];
+ Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the
+ * relevant command
+ */
+ } cu;
}
#include "vmware_pack_end.h"
Vmxnet3_DriverShared;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 8bb13e52..98252bb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -56,6 +56,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -69,6 +70,8 @@
#define PROCESS_SYS_EVENTS 0
+#define VMXNET3_TX_MAX_SEG UINT8_MAX
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -138,7 +141,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.driver.name, dev->data->port_id, post_string);
+ dev->data->drv_name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -223,6 +226,24 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
}
/*
+ * Gets tx data ring descriptor size.
+ */
+static uint16_t
+eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
+{
+ uint16 txdata_desc_size;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
+ txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
+ txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
+ sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
+}
+
+/*
* It returns 0 on success.
*/
static int
@@ -237,7 +258,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
- pci_dev = eth_dev->pci_dev;
+ eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/*
* for secondary processes, we don't initialize any further as primary
@@ -247,6 +269,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -261,13 +284,26 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
- if (ver & 0x1)
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
- else {
- PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
+
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ hw->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ hw->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ hw->version = VMXNET3_REV_1 + 1;
+ } else {
+ PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
return -EIO;
}
+ PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
+
/* Check UPT version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
@@ -307,6 +343,14 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+ hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
+
+ hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+ RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
+ hw->rxdata_desc_size);
+
return 0;
}
@@ -326,6 +370,7 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -333,16 +378,23 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct eth_driver rte_vmxnet3_pmd = {
- .pci_drv = {
- .id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_vmxnet3_dev_init,
- .eth_dev_uninit = eth_vmxnet3_dev_uninit,
- .dev_private_size = sizeof(struct vmxnet3_hw),
+static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
+}
+
+static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
+}
+
+static struct rte_pci_driver rte_vmxnet3_pmd = {
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_vmxnet3_pci_probe,
+ .remove = eth_vmxnet3_pci_remove,
};
static int
@@ -449,6 +501,92 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
}
static int
+vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_CmdInfo *cmdInfo;
+ struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
+ uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
+ uint32_t num, i, j, size;
+
+ if (hw->memRegsPA == 0) {
+ const struct rte_memzone *mz;
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
+ sizeof(Vmxnet3_MemoryRegion);
+
+ mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
+ 1);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ hw->memRegs = mz->addr;
+ hw->memRegsPA = mz->phys_addr;
+ }
+
+ num = hw->num_rx_queues;
+
+ for (i = 0; i < num; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ mp[i] = rxq->mp;
+ index[i] = 1 << i;
+ }
+
+ /*
+ * The same mempool could be used by multiple queues. In such a case,
+ * remove duplicate mempool entries. Only one entry is kept with
+ * bitmask indicating queues that are using this mempool.
+ */
+ for (i = 1; i < num; i++) {
+ for (j = 0; j < i; j++) {
+ if (mp[i] == mp[j]) {
+ mp[i] = NULL;
+ index[j] |= 1 << i;
+ break;
+ }
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < num; i++) {
+ if (mp[i] == NULL)
+ continue;
+
+ Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
+
+ mr->startPA =
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+ mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
+ STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
+ mr->txQueueBits = index[i];
+ mr->rxQueueBits = index[i];
+
+ PMD_INIT_LOG(INFO,
+ "index: %u startPA: %" PRIu64 " length: %u, "
+ "rxBits: %x",
+ j, mr->startPA, mr->length, mr->rxQueueBits);
+ j++;
+ }
+ hw->memRegs->numRegs = j;
+ PMD_INIT_LOG(INFO, "numRegs: %u", j);
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (j - 1) * sizeof(Vmxnet3_MemoryRegion);
+
+ cmdInfo = &shared->cu.cmdInfo;
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen = size;
+ cmdInfo->varConf.confPA = hw->memRegsPA;
+
+ return 0;
+}
+
+static int
vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
{
struct rte_eth_conf port_conf = dev->data->dev_conf;
@@ -497,6 +635,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
tqd->conf.txRingSize = txq->cmd_ring.size;
tqd->conf.compRingSize = txq->comp_ring.size;
tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
tqd->conf.intrIdx = txq->comp_ring.intr_idx;
tqd->status.stopped = TRUE;
tqd->status.error = 0;
@@ -515,6 +654,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
rqd->conf.compRingSize = rxq->comp_ring.size;
rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(hw)) {
+ rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
+ rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
+ }
rqd->status.stopped = TRUE;
rqd->status.error = 0;
memset(&rqd->stats, 0, sizeof(rqd->stats));
@@ -583,6 +726,20 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* Setup memory region for rx buffers */
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
+
/* Disable interrupts */
vmxnet3_disable_intr(hw);
@@ -596,6 +753,8 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ hw->adapter_stopped = FALSE;
+
/* Setting proper Rx Mode and issue Rx Mode Update command */
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
@@ -706,13 +865,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
-vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
@@ -728,6 +890,8 @@ vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
.nb_max = VMXNET3_TX_RING_MAX_SIZE,
.nb_min = VMXNET3_DEF_TX_RING_SIZE,
.nb_align = 1,
+ .nb_seg_max = VMXNET3_TX_MAX_SEG,
+ .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
dev_info->rx_offload_capa =
@@ -771,7 +935,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old, link;
+ struct rte_eth_link old = { 0 }, link;
uint32_t ret;
/* Link status doesn't change for stopped dev */
@@ -960,5 +1124,6 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
}
#endif
-RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 7d3b11ee..7a032629 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,6 +34,8 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
+#include <rte_io.h>
+
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
@@ -99,6 +101,11 @@ struct vmxnet3_hw {
uint8_t num_rx_queues;
uint8_t bufs_per_pkt;
+ uint8_t version;
+
+ uint16_t txdata_desc_size; /* tx data ring buffer size */
+ uint16_t rxdata_desc_size; /* rx data ring buffer size */
+
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -112,15 +119,24 @@ struct vmxnet3_hw {
uint64_t rss_confPA;
vmxnet3_mf_table_t *mf_table;
uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ Vmxnet3_MemRegs *memRegs;
+ uint64_t memRegsPA;
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
+#define VMXNET3_VERSION_GE_3(hw) ((hw)->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_2(hw) ((hw)->version >= VMXNET3_REV_2 + 1)
+
#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
/* Config space read/writes */
-#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define VMXNET3_PCI_REG(reg) rte_read32(reg)
static inline uint32_t
vmxnet3_read_addr(volatile void *addr)
@@ -128,9 +144,7 @@ vmxnet3_read_addr(volatile void *addr)
return VMXNET3_PCI_REG(addr);
}
-#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
- VMXNET3_PCI_REG((reg)) = (value); \
-} while(0)
+#define VMXNET3_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
@@ -146,6 +160,20 @@ vmxnet3_read_addr(volatile void *addr)
#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+static inline uint8_t
+vmxnet3_get_ring_idx(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= hw->num_rx_queues &&
+ rqID < 2 * hw->num_rx_queues) ? 1 : 0;
+}
+
+static inline bool
+vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= 2 * hw->num_rx_queues &&
+ rqID < 3 * hw->num_rx_queues);
+}
+
/*
* RX/TX function prototypes
*/
@@ -171,5 +199,7 @@ uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t vmxnet3_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index b50d2b00..d2e8323b 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -42,6 +42,9 @@
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 128
+/* Default rx data ring desc size */
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
#define VMXNET3_SUCCESS 0
#define VMXNET3_FAIL -1
@@ -138,9 +141,11 @@ typedef struct vmxnet3_tx_queue {
uint32_t qid;
struct Vmxnet3_TxQueueDesc *shared;
struct vmxnet3_txq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
uint8_t port_id; /**< Device port identifier. */
+ uint16_t txdata_desc_size;
} vmxnet3_tx_queue_t;
struct vmxnet3_rxq_stats {
@@ -150,17 +155,28 @@ struct vmxnet3_rxq_stats {
uint64_t rx_buf_alloc_failure;
};
+struct vmxnet3_rx_data_ring {
+ uint8_t *base;
+ uint64_t basePA;
+ uint32_t size;
+};
+
typedef struct vmxnet3_rx_queue {
struct rte_mempool *mp;
struct vmxnet3_hw *hw;
struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_data_ring data_ring;
+ uint16_t data_desc_size;
uint32_t qid1;
uint32_t qid2;
+ /* rqID in RCD for buffer from data ring */
+ uint32_t data_ring_qid;
Vmxnet3_RxQueueDesc *shared;
struct rte_mbuf *start_seg;
struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
uint8_t port_id; /**< Device port identifier. */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 93db10fb..e865c675 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -69,6 +69,7 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_net.h>
#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
@@ -76,6 +77,14 @@
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -192,6 +201,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
}
}
@@ -209,6 +220,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
}
}
@@ -235,7 +249,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += tq->txdata_desc_size * data_ring->size;
memset(ring->base, 0, size);
}
@@ -245,8 +259,10 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
{
int i;
vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_hw *hw = rq->hw;
struct vmxnet3_cmd_ring *ring0, *ring1;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
if (rq != NULL) {
@@ -271,6 +287,8 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
+ size += rq->data_desc_size * data_ring->size;
memset(ring0->base, 0, size);
}
@@ -350,6 +368,53 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -415,10 +480,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (txm->nb_segs == 1 &&
- rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
- tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)
+ ((uint8 *)txq->data_ring.base +
+ txq->cmd_ring.next2fill *
+ txq->txdata_desc_size);
copy_size = rte_pktmbuf_pkt_len(txm);
rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
}
@@ -435,12 +503,15 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- if (copy_size)
- gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
- else
+ if (copy_size) {
+ uint64 offset = txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
+ gdesc->txd.addr =
+ rte_cpu_to_le_64(txq->data_ring.basePA +
+ offset);
+ } else {
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ }
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;
@@ -696,7 +767,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
idx = rcd->rxdIdx;
- ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
@@ -762,6 +833,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
+ if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
+ uint8_t *rdd = rxq->data_ring.base +
+ idx * rxq->data_desc_size;
+
+ RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
+ rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
+ rdd, rcd->len);
+ }
+
rxq->start_seg = rxm;
vmxnet3_rx_offload(rcd, rxm);
} else {
@@ -816,30 +896,6 @@ rcd_done:
return nb_rx;
}
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -876,6 +932,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
+ txq->txdata_desc_size = hw->txdata_desc_size;
ring = &txq->cmd_ring;
comp_ring = &txq->comp_ring;
@@ -905,13 +962,15 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += txq->txdata_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ txq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring initialization */
@@ -955,6 +1014,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring;
int size;
uint8_t i;
char mem_name[32];
@@ -975,11 +1035,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
+ rxq->data_desc_size = hw->rxdata_desc_size;
rxq->stopped = TRUE;
ring0 = &rxq->cmd_ring[0];
ring1 = &rxq->cmd_ring[1];
comp_ring = &rxq->comp_ring;
+ data_ring = &rxq->data_ring;
/* Rx vmxnet rings length should be between 256-4096 */
if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
@@ -995,6 +1058,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
comp_ring->size = ring0->size + ring1->size;
+ data_ring->size = ring0->size;
/* Rx vmxnet rings structure initialization */
ring0->next2fill = 0;
@@ -1008,12 +1072,16 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
+ size += rxq->data_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ rxq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring0 initialization */
@@ -1029,6 +1097,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
ring1->size;
+ /* data_ring initialization */
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
+ data_ring->base =
+ (uint8_t *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ }
+
/* cmd_ring0-cmd_ring1 buf_info allocation */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {