summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2017-02-05 15:21:19 +0200
committerIdo Barnea <ibarnea@cisco.com>2017-02-13 12:32:01 +0200
commit9ca4a157305e4e23a892ba9bafc9eee0f66954ce (patch)
tree1a8afcf815fd33e7623e3c16246abe86c01bc8fd /src/dpdk/drivers/net/vmxnet3
parent2dab8f65015e9fa90df395be6ee1a07e9ac71044 (diff)
dpdk1702-rc2 upstream files unchanged + mlx5 driver rc3
Signed-off-by: Ido Barnea <ibarnea@cisco.com>
Diffstat (limited to 'src/dpdk/drivers/net/vmxnet3')
-rw-r--r--src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c166
-rw-r--r--src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h42
-rw-r--r--src/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h22
-rw-r--r--src/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c189
4 files changed, 278 insertions, 141 deletions
diff --git a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 47fdc3ec..ff63a536 100644
--- a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -69,6 +69,8 @@
#define PROCESS_SYS_EVENTS 0
+#define VMXNET3_TX_MAX_SEG UINT8_MAX
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -81,11 +83,11 @@ static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete);
+ int wait_to_complete);
static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *stats);
+ struct rte_eth_stats *stats);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+ struct rte_eth_dev_info *dev_info);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
@@ -118,7 +120,7 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
- .mac_addr_set = vmxnet3_mac_addr_set,
+ .mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
@@ -131,20 +133,27 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
- const char *post_string, int socket_id, uint16_t align)
+ const char *post_string, int socket_id,
+ uint16_t align, bool reuse)
{
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.name, dev->data->port_id, post_string);
+ dev->data->drv_name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
+ if (!reuse) {
+ if (mz)
+ rte_memzone_free(mz);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ 0, align);
+ }
+
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size,
- socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
}
/**
@@ -194,7 +203,7 @@ vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *src = link;
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
+ *(uint64_t *)src) == 0)
return -1;
return 0;
@@ -212,7 +221,7 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
for (i = 0; i < VMXNET3_MAX_INTRS; i++)
- VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
/*
@@ -230,7 +239,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
- pci_dev = eth_dev->pci_dev;
+ eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/*
* for secondary processes, we don't initialize any further as primary
@@ -240,6 +250,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -274,8 +285,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Getting MAC Address */
mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
- memcpy(hw->perm_addr , &mac_lo, 4);
- memcpy(hw->perm_addr+4, &mac_hi, 2);
+ memcpy(hw->perm_addr, &mac_lo, 4);
+ memcpy(hw->perm_addr + 4, &mac_hi, 2);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
@@ -319,6 +330,7 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -328,29 +340,16 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_vmxnet3_pmd = {
.pci_drv = {
- .name = "rte_vmxnet3_pmd",
.id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_vmxnet3_dev_init,
.eth_dev_uninit = eth_vmxnet3_dev_uninit,
.dev_private_size = sizeof(struct vmxnet3_hw),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
- */
-static int
-rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_vmxnet3_pmd);
- return 0;
-}
-
static int
vmxnet3_dev_configure(struct rte_eth_dev *dev)
{
@@ -360,9 +359,16 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->nb_rx_queues > UINT8_MAX ||
- dev->data->nb_tx_queues > UINT8_MAX)
+ if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
+ dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
return -EINVAL;
+ }
+
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
+ return -EINVAL;
+ }
size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
@@ -378,7 +384,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
* on current socket
*/
mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
- "shared", rte_socket_id(), 8);
+ "shared", rte_socket_id(), 8, 1);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
@@ -391,10 +397,14 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
/*
* Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
- * on current socket
+ * on current socket.
+ *
+ * We cannot reuse this memzone from previous allocation as its size
+ * depends on the number of tx and rx queues, which could be different
+ * from one config to another.
*/
- mz = gpa_zone_reserve(dev, size, "queuedesc",
- rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+ mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
+ VMXNET3_QUEUE_DESC_ALIGN, 0);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
@@ -408,10 +418,10 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
hw->queue_desc_len = (uint16_t)size;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
-
/* Allocate memory structure for UPT1_RSSConf and configure */
- mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
- rte_socket_id(), RTE_CACHE_LINE_SIZE);
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
+ "rss_conf", rte_socket_id(),
+ RTE_CACHE_LINE_SIZE, 1);
if (mz == NULL) {
PMD_INIT_LOG(ERR,
"ERROR: Creating rss_conf structure zone");
@@ -459,8 +469,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
/* Setting up Guest OS information */
devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
- VMXNET3_GOS_BITS_32 :
- VMXNET3_GOS_BITS_64;
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
@@ -523,6 +532,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
+ if (dev->data->dev_conf.rxmode.enable_lro) {
+ devRead->misc.uptFeatures |= VMXNET3_F_LRO;
+ devRead->misc.maxNumRxSG = 0;
+ }
+
if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
if (ret != VMXNET3_SUCCESS)
@@ -535,7 +549,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
}
vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
vmxnet3_write_mac(hw, hw->perm_addr);
@@ -550,7 +564,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
static int
vmxnet3_dev_start(struct rte_eth_dev *dev)
{
- int status, ret;
+ int ret;
struct vmxnet3_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
@@ -567,11 +581,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
/* Activate device by register write */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
- status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (status != 0) {
+ if (ret != 0) {
PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
- return -1;
+ return -EINVAL;
}
/* Disable interrupts */
@@ -583,7 +597,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
*/
ret = vmxnet3_dev_rxtx_init(dev);
if (ret != VMXNET3_SUCCESS) {
- PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
+ PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
return ret;
}
@@ -598,7 +612,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
vmxnet3_process_events(hw);
#endif
- return status;
+ return VMXNET3_SUCCESS;
}
/*
@@ -664,16 +678,15 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
stats->q_opackets[i] = txStats->ucastPktsTxOK +
- txStats->mcastPktsTxOK +
- txStats->bcastPktsTxOK;
+ txStats->mcastPktsTxOK +
+ txStats->bcastPktsTxOK;
stats->q_obytes[i] = txStats->ucastBytesTxOK +
- txStats->mcastBytesTxOK +
- txStats->bcastBytesTxOK;
+ txStats->mcastBytesTxOK +
+ txStats->bcastBytesTxOK;
stats->opackets += stats->q_opackets[i];
stats->obytes += stats->q_obytes[i];
- stats->oerrors += txStats->pktsTxError +
- txStats->pktsTxDiscard;
+ stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard;
}
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
@@ -681,12 +694,12 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
- rxStats->mcastPktsRxOK +
- rxStats->bcastPktsRxOK;
+ rxStats->mcastPktsRxOK +
+ rxStats->bcastPktsRxOK;
stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
- rxStats->mcastBytesRxOK +
- rxStats->bcastBytesRxOK;
+ rxStats->mcastBytesRxOK +
+ rxStats->bcastBytesRxOK;
stats->ipackets += stats->q_ipackets[i];
stats->ibytes += stats->q_ibytes[i];
@@ -698,16 +711,17 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
-vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
- dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
- /* TRex patch */
dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
@@ -722,12 +736,15 @@ vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
.nb_max = VMXNET3_TX_RING_MAX_SIZE,
.nb_min = VMXNET3_DEF_TX_RING_SIZE,
.nb_align = 1,
+ .nb_seg_max = VMXNET3_TX_MAX_SEG,
+ .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -760,14 +777,16 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* return 0 means link status changed, -1 means not changed */
static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
+vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
struct rte_eth_link old, link;
uint32_t ret;
+ /* Link status doesn't change for stopped dev */
if (dev->data->dev_started == 0)
- return -1; /* Link status doesn't change for stopped dev */
+ return -1;
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_read_link_status(dev, &old);
@@ -789,8 +808,8 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wai
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
static void
-vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
-
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
+{
struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
if (set)
@@ -923,11 +942,13 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
PMD_INIT_LOG(ERR,
- "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
+ "Process events in %s(): VMXNET3_ECR_LINK event",
+ __func__);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
if (hw->tqd_start->status.stopped)
PMD_INIT_LOG(ERR, "tq error 0x%x",
@@ -946,14 +967,9 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
if (events & VMXNET3_ECR_DEBUG)
PMD_INIT_LOG(ERR, "Debug event generated by device.");
-
}
#endif
-static struct rte_driver rte_vmxnet3_driver = {
- .type = PMD_PDEV,
- .init = rte_vmxnet3_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_vmxnet3_driver, vmxnet3);
-DRIVER_REGISTER_PCI_TABLE(vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio");
diff --git a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 1be833ab..348c840b 100644
--- a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,6 +34,8 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
+#include <rte_io.h>
+
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
@@ -62,8 +64,7 @@
ETH_RSS_NONFRAG_IPV6_TCP)
/* RSS configuration structure - shared with device through GPA */
-typedef
-struct VMXNET3_RSSConf {
+typedef struct VMXNET3_RSSConf {
uint16_t hashType;
uint16_t hashFunc;
uint16_t hashKeySize;
@@ -76,15 +77,13 @@ struct VMXNET3_RSSConf {
uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
} VMXNET3_RSSConf;
-typedef
-struct vmxnet3_mf_table {
+typedef struct vmxnet3_mf_table {
void *mfTableBase; /* Multicast addresses list */
uint64_t mfTablePA; /* Physical address of the list */
uint16_t num_addrs; /* number of multicast addrs */
} vmxnet3_mf_table_t;
struct vmxnet3_hw {
-
uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
/* BAR2: MSI-X Regs */
@@ -111,10 +110,10 @@ struct vmxnet3_hw {
uint64_t queueDescPA;
uint16_t queue_desc_len;
- VMXNET3_RSSConf *rss_conf;
- uint64_t rss_confPA;
- vmxnet3_mf_table_t *mf_table;
- uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
@@ -123,16 +122,15 @@ struct vmxnet3_hw {
/* Config space read/writes */
-#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define VMXNET3_PCI_REG(reg) rte_read32(reg)
-static inline uint32_t vmxnet3_read_addr(volatile void *addr)
+static inline uint32_t
+vmxnet3_read_addr(volatile void *addr)
{
return VMXNET3_PCI_REG(addr);
}
-#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
- VMXNET3_PCI_REG((reg)) = (value); \
-} while(0)
+#define VMXNET3_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
@@ -158,20 +156,22 @@ void vmxnet3_dev_rx_queue_release(void *rxq);
void vmxnet3_dev_tx_queue_release(void *txq);
int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
- uint16_t nb_rx_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool);
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf);
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
int vmxnet3_rss_configure(struct rte_eth_dev *dev);
uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
+uint16_t vmxnet3_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h
index 69ff2ded..b50d2b00 100644
--- a/src/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/src/dpdk/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -96,12 +96,12 @@ vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
}
typedef struct vmxnet3_comp_ring {
- uint32_t size;
- uint32_t next2proc;
- uint8_t gen;
- uint8_t intr_idx;
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
Vmxnet3_GenericDesc *base;
- uint64_t basePA;
+ uint64_t basePA;
} vmxnet3_comp_ring_t;
struct vmxnet3_data_ring {
@@ -121,13 +121,13 @@ vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
}
struct vmxnet3_txq_stats {
- uint64_t drop_total; /* # of pkts dropped by the driver,
+ uint64_t drop_total; /* # of pkts dropped by the driver,
* the counters below track droppings due to
* different reasons
*/
- uint64_t drop_too_many_segs;
- uint64_t drop_tso;
- uint64_t tx_ring_full;
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
};
typedef struct vmxnet3_tx_queue {
@@ -158,8 +158,8 @@ typedef struct vmxnet3_rx_queue {
uint32_t qid1;
uint32_t qid2;
Vmxnet3_RxQueueDesc *shared;
- struct rte_mbuf *start_seg;
- struct rte_mbuf *last_seg;
+ struct rte_mbuf *start_seg;
+ struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
diff --git a/src/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/src/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 9deeb3ff..b246884b 100644
--- a/src/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/src/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -57,7 +57,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -70,6 +69,7 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_net.h>
#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
@@ -77,6 +77,14 @@
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -141,10 +149,10 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
#endif
static void
-vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
{
while (ring->next2comp != ring->next2fill) {
- /* No need to worry about tx desc ownership, device is quiesced by now. */
+ /* No need to worry about desc ownership, device is quiesced by now. */
vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
if (buf_info->m) {
@@ -158,20 +166,39 @@ vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
}
static void
+vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+ uint32_t i;
+
+ for (i = 0; i < ring->size; i++) {
+ /* No need to worry about desc ownership, device is quiesced by now. */
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
+
+ if (buf_info->m) {
+ rte_pktmbuf_free_seg(buf_info->m);
+ buf_info->m = NULL;
+ buf_info->bufPA = 0;
+ buf_info->len = 0;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(ring);
+ }
+}
+
+static void
vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
{
- vmxnet3_cmd_ring_release_mbufs(ring);
rte_free(ring->buf_info);
ring->buf_info = NULL;
}
-
void
vmxnet3_dev_tx_queue_release(void *txq)
{
vmxnet3_tx_queue_t *tq = txq;
if (tq != NULL) {
+ /* Release mbufs */
+ vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
}
@@ -184,6 +211,10 @@ vmxnet3_dev_rx_queue_release(void *rxq)
vmxnet3_rx_queue_t *rq = rxq;
if (rq != NULL) {
+ /* Release mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
@@ -201,7 +232,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
if (tq != NULL) {
/* Release the cmd_ring mbufs */
- vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring);
+ vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
}
/* Tx vmxnet rings structure initialization*/
@@ -230,7 +261,7 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
if (rq != NULL) {
/* Release both the cmd_rings mbufs */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
- vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
}
ring0 = &rq->cmd_ring[0];
@@ -328,6 +359,53 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -392,7 +470,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
continue;
}
- if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ if (txm->nb_segs == 1 &&
+ rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
struct Vmxnet3_TxDataDesc *tdd;
tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
@@ -414,8 +493,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
if (copy_size)
gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
else
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
@@ -495,16 +574,41 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t val = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+ struct Vmxnet3_RxDesc *rxd =
+ (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+ if (ring_id == 0)
+ val = VMXNET3_RXD_BTYPE_HEAD;
+ else
+ val = VMXNET3_RXD_BTYPE_BODY;
+
+ buf_info->m = mbuf;
+ buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+ buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+
+ rxd->addr = buf_info->bufPA;
+ rxd->btype = val;
+ rxd->len = buf_info->len;
+ rxd->gen = ring->gen;
+
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+}
/*
* Allocates mbufs and clusters. Post rx descriptors with buffer details
* so that device can receive packets in those buffers.
- * Ring layout:
- * Among the two rings, 1st ring contains buffers of type 0 and type1.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type 1.
* bufs_per_pkt is set such that for non-LRO cases all the buffers required
* by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
* 2nd ring contains buffers of type 1 alone. Second ring mostly be used
* only for LRO.
- *
*/
static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
@@ -549,8 +653,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len -
RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA =
- rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
@@ -636,9 +739,18 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
while (rcd->gen == rxq->comp_ring.gen) {
+ struct rte_mbuf *newm;
+
if (nb_rx >= nb_pkts)
break;
+ newm = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(newm == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf");
+ rxq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+
idx = rcd->rxdIdx;
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
@@ -676,7 +788,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
-
/* Initialize newly received packet buffer */
rxm->port = rxq->port_id;
rxm->nb_segs = 1;
@@ -736,10 +847,11 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rcd_done:
rxq->cmd_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+ rxq->cmd_ring[ring_idx].size);
- /* It's time to allocate some new buf and renew descriptors */
- vmxnet3_post_rx_bufs(rxq, ring_idx);
+ /* It's time to renew descriptors */
+ vmxnet3_renew_desc(rxq, ring_idx, newm);
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
rxq->cmd_ring[ring_idx].next2fill);
@@ -751,8 +863,7 @@ rcd_done:
rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
nb_rxd++;
if (nb_rxd > rxq->cmd_ring[0].size) {
- PMD_RX_LOG(ERR,
- "Used up quota of receiving packets,"
+ PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
" relinquish control.");
break;
}
@@ -774,15 +885,15 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
+ dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
+ socket_id, 0, VMXNET3_RING_BA_ALIGN);
}
int
@@ -790,7 +901,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+ __rte_unused const struct rte_eth_txconf *tx_conf)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
@@ -808,7 +919,8 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+ RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
return -ENOMEM;
@@ -891,12 +1003,12 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
const struct rte_memzone *mz;
struct vmxnet3_rx_queue *rxq;
- struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
int size;
@@ -905,7 +1017,8 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+ RTE_CACHE_LINE_SIZE);
if (rxq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
return -ENOMEM;
@@ -979,7 +1092,9 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
ring->rid = i;
snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
- ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ ring->buf_info = rte_zmalloc(mem_name,
+ ring->size * sizeof(vmxnet3_buf_info_t),
+ RTE_CACHE_LINE_SIZE);
if (ring->buf_info == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
return -ENOMEM;
@@ -1013,10 +1128,15 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
/* Passing 0 as alloc_num will allocate full ring */
ret = vmxnet3_post_rx_bufs(rxq, j);
if (ret <= 0) {
- PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+ PMD_INIT_LOG(ERR,
+ "ERROR: Posting Rxq: %d buffers ring: %d",
+ i, j);
return -ret;
}
- /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+ /*
+ * Updating device with the index:next2fill to fill the
+ * mbufs for coming packets.
+ */
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
rxq->cmd_ring[j].next2fill);
@@ -1064,7 +1184,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
/* loading hashKeySize */
dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
- /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
if (port_rss_conf->rss_key == NULL) {
@@ -1073,7 +1193,8 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
}
/* loading hashKey */
- memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+ dev_rss_conf->hashKeySize);
/* loading indTable */
for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {