aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/thunderx
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/thunderx
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/thunderx')
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c13
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h1
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h5
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c302
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h2
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c144
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h24
-rw-r--r--drivers/net/thunderx/nicvf_struct.h29
8 files changed, 358 insertions, 162 deletions
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index ea8092ca..5b1abe20 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,19 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
else
val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+ nic->vlan_strip = enable;
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ val &= ~(0xfULL);
+ val |= (num_dwords & 0xf);
+
nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 284d0bdf..fd13ea84 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -193,6 +193,7 @@ uint32_t nicvf_qsize_sq_roundup(uint32_t val);
void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
void nicvf_apad_config(struct nicvf *nic, bool enable);
+void nicvf_first_skip_config(struct nicvf *nic, uint8_t dwords);
int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
int nicvf_rss_term(struct nicvf *nic);
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index b13c21ff..b12c8ec5 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -171,7 +171,10 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS (64)
-#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */
+/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
+#define NIC_HW_L2_OVERHEAD (26)
+#define NIC_HW_MAX_MTU (9190)
+#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
#define NIC_HW_MAX_SEGS (12)
/* Descriptor alignments */
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index a65361fb..a55c3ca6 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -15,7 +15,6 @@
#include <sys/queue.h>
#include <rte_alarm.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_byteorder.h>
#include <rte_common.h>
@@ -35,6 +34,8 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
#include "base/nicvf_plat.h"
@@ -51,10 +52,10 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-RTE_INIT(nicvf_init_log);
-static void
-nicvf_init_log(void)
+RTE_INIT(nicvf_init_log)
{
nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
if (nicvf_logtype_mbox >= 0)
@@ -69,25 +70,14 @@ nicvf_init_log(void)
rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
}
-static inline int
-nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+static void
+nicvf_link_status_update(struct nicvf *nic,
+ struct rte_eth_link *link)
{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
+ memset(link, 0, sizeof(*link));
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
-static inline void
-nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
-{
- link->link_status = nic->link_up;
- link->link_duplex = ETH_LINK_AUTONEG;
if (nic->duplex == NICVF_HALF_DUPLEX)
link->link_duplex = ETH_LINK_HALF_DUPLEX;
else if (nic->duplex == NICVF_FULL_DUPLEX)
@@ -101,12 +91,17 @@ nicvf_interrupt(void *arg)
{
struct rte_eth_dev *dev = arg;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_link link;
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ nicvf_link_status_update(nic, &link);
+ rte_eth_linkstatus_set(dev, &link);
+
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -153,24 +148,23 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (wait_to_complete) {
/* rte_eth_link_get() might need to wait up to 9 seconds */
for (i = 0; i < MAX_CHECK_TIME; i++) {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
- if (link.link_status)
+ nicvf_link_status_update(nic, &link);
+ if (link.link_status == ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
}
} else {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ nicvf_link_status_update(nic, &link);
}
- return nicvf_atomic_write_link_status(dev, &link);
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
size_t i;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
@@ -188,7 +182,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
return -EINVAL;
@@ -202,11 +196,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+ if (nicvf_mbox_update_hw_max_frs(nic, mtu))
return -EINVAL;
- /* Update max frame size */
- rxmode->max_rx_pkt_len = (uint32_t)frame_size;
+ /* Update max_rx_pkt_len */
+ rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
@@ -363,11 +357,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
- if (dev->rx_pkt_burst == nicvf_recv_pkts ||
- dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
- return ptypes;
- return NULL;
+ /* All Ptypes are supported in all Rx functions. */
+ return ptypes;
}
static void
@@ -891,7 +883,7 @@ nicvf_dev_tx_queue_release(void *sq)
static void
nicvf_set_tx_function(struct rte_eth_dev *dev)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = NULL;
size_t i;
bool multiseg = false;
@@ -912,6 +904,9 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = nicvf_xmit_pkts;
}
+ if (!txq)
+ return;
+
if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
else
@@ -921,13 +916,23 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
static void
nicvf_set_rx_function(struct rte_eth_dev *dev)
{
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
- } else {
- PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts;
- }
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ const eth_rx_burst_t rx_burst_func[2][2][2] = {
+ /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+ [0][0][0] = nicvf_recv_pkts_no_offload,
+ [0][0][1] = nicvf_recv_pkts_vlan_strip,
+ [0][1][0] = nicvf_recv_pkts_cksum,
+ [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+ [1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+ [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+ [1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+ [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
+ };
+
+ dev->rx_pkt_burst =
+ rx_burst_func[dev->data->scattered_rx]
+ [nic->offload_cksum][nic->vlan_strip];
}
static int
@@ -939,7 +944,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -953,17 +958,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- conf_offloads = tx_conf->offloads;
- offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- unsupported_offloads = conf_offloads & ~offload_capa;
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1015,9 +1009,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
txq->tx_free_thresh = tx_free_thresh;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- txq->offloads = conf_offloads;
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
- is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
@@ -1248,6 +1243,7 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def;
+ struct nicvf *nic = rxq->nic;
RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
@@ -1257,8 +1253,11 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
offsetof(struct rte_mbuf, data_off) != 4);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
offsetof(struct rte_mbuf, data_off) != 6);
+ RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+ offsetof(struct nicvf_rxq,
+ rxq_fastpath_data_start) > 128);
mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
mb_def.port = rxq->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
@@ -1277,10 +1276,20 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
+ uint32_t buffsz;
+ struct rte_pktmbuf_pool_private *mbp_priv;
PMD_INIT_FUNC_TRACE();
+ /* First skip check */
+ mbp_priv = rte_mempool_get_priv(mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz < (uint32_t)(nic->skip_bytes)) {
+ PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
+ return -EINVAL;
+ }
+
if (qidx >= MAX_RCV_QUEUES_PER_QS)
nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
@@ -1291,24 +1300,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
-
- conf_offloads = rx_conf->offloads;
-
- if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- offload_capa = NICVF_RX_OFFLOAD_CAPA;
- unsupported_offloads = conf_offloads & ~offload_capa;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1316,7 +1307,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
}
/* Mempool memory must be physically contiguous */
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
@@ -1334,6 +1325,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
return -EINVAL;
}
+
/* Check rx_free_thresh upper bound */
rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
rx_conf->rx_free_thresh :
@@ -1389,10 +1381,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1408,8 +1401,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
@@ -1418,7 +1409,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
@@ -1445,12 +1436,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOREFCOUNT |
- ETH_TXQ_FLAGS_NOMULTMEMP |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP,
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -1495,7 +1480,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
- bool vlan_strip;
+ int mask;
PMD_INIT_FUNC_TRACE();
@@ -1541,6 +1526,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
return -EINVAL;
}
rxq->mbuf_phys_off -= data_off;
+ rxq->mbuf_phys_off -= nic->skip_bytes;
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
@@ -1615,9 +1601,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
- nicvf_vlan_hw_strip(nic, vlan_strip);
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = nicvf_vlan_offload_config(dev, mask);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
@@ -1751,8 +1737,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
/* Setup MTU based on max_rx_pkt_len or default */
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN - ETHER_CRC_LEN
- : ETHER_MTU;
+ - ETHER_HDR_LEN : ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
@@ -1771,7 +1756,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
return ret;
}
- /* Configure callbacks based on scatter mode */
+ /* Configure callbacks based on offloads */
nicvf_set_tx_function(dev);
nicvf_set_rx_function(dev);
@@ -1923,8 +1908,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint8_t cqcount;
- uint64_t conf_rx_offloads, rx_offload_capa;
- uint64_t conf_tx_offloads, tx_offload_capa;
PMD_INIT_FUNC_TRACE();
@@ -1933,32 +1916,10 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
- tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
- PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_tx_offloads, tx_offload_capa);
- return -ENOTSUP;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- conf_rx_offloads = rxmode->offloads;
- rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
- if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
- PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_rx_offloads, rx_offload_capa);
- return -ENOTSUP;
- }
-
- if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -2014,6 +1975,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
}
}
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ nic->offload_cksum = 1;
+
PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
dev->data->port_id, nicvf_hw_cap(nic));
@@ -2033,6 +1997,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.dev_infos_get = nicvf_dev_info_get,
.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
.mtu_set = nicvf_dev_set_mtu,
+ .vlan_offload_set = nicvf_vlan_offload_set,
.reta_update = nicvf_dev_reta_update,
.reta_query = nicvf_dev_reta_query,
.rss_hash_update = nicvf_dev_rss_hash_update,
@@ -2050,6 +2015,83 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
};
static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ nicvf_vlan_hw_strip(nic, true);
+ else
+ nicvf_vlan_hw_strip(nic, false);
+ }
+
+ return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ nicvf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static inline int
+nicvf_set_first_skip(struct rte_eth_dev *dev)
+{
+ int bytes_to_skip = 0;
+ int ret = 0;
+ unsigned int i;
+ struct rte_kvargs *kvlist;
+ static const char *const skip[] = {
+ SKIP_DATA_BYTES,
+ NULL};
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (!dev->device->devargs) {
+ nicvf_first_skip_config(nic, 0);
+ return ret;
+ }
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (kvlist->count == 0)
+ goto exit;
+
+ for (i = 0; i != kvlist->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
+
+ if (!strcmp(pair->key, SKIP_DATA_BYTES))
+ bytes_to_skip = atoi(pair->value);
+ }
+
+ /*128 bytes amounts to one cache line*/
+ if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
+ if (!(bytes_to_skip % 8)) {
+ nicvf_first_skip_config(nic, (bytes_to_skip / 8));
+ nic->skip_bytes = bytes_to_skip;
+ goto kvlist_free;
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ nicvf_first_skip_config(nic, 0);
+kvlist_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+static int
nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
{
int ret;
@@ -2158,6 +2200,11 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto malloc_fail;
}
+ ret = nicvf_set_first_skip(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure first skip");
+ goto malloc_fail;
+ }
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
@@ -2230,3 +2277,4 @@ static struct rte_pci_driver rte_nicvf_pmd = {
RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index ea8dccd1..ae440fef 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -38,6 +38,7 @@
DEV_TX_OFFLOAD_MULTI_SEGS)
#define NICVF_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
@@ -51,6 +52,7 @@
#define VLAN_TAG_SIZE 4 /* 802.3ac tag */
+#define SKIP_DATA_BYTES "skip_data_bytes"
static inline struct nicvf *
nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d..247c3568 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -162,12 +162,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
@@ -218,12 +220,14 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
return i;
}
@@ -327,6 +331,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
}
+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+ static const uint64_t flag_table[3] __rte_cache_aligned = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ };
+
+ const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+ (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+ return flag_table[idx];
+}
+
static inline int __hot
nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
{
@@ -385,11 +403,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
if (likely(cqe_rx_w0.rss_alg)) {
pkt->hash.rss = cqe_rx_w2.rss_tag;
pkt->ol_flags |= PKT_RX_RSS_HASH;
+
}
}
-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ const uint32_t flag)
{
uint32_t i, to_process;
struct cqe_rx_t *cqe_rx;
@@ -420,7 +440,19 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
+
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci =
+ rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
pkt->data_len = cqe_rx_w3.rb0_sz;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +477,43 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return to_process;
}
-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
- uint64_t mbuf_init)
+ uint64_t mbuf_init, const uint32_t flag)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +531,22 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
nicvf_mbuff_init_mseg_update(
pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
*rx_pkt = pkt;
@@ -491,9 +565,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
return nb_segs;
}
-uint16_t __hot
+static __rte_always_inline uint16_t __hot
nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+ uint16_t nb_pkts, const uint32_t flag)
{
union cq_entry_t *cq_entry;
struct cqe_rx_t *cqe_rx;
@@ -515,7 +589,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- rx_pkts + i, rbptr_offset, mbuf_init);
+ rx_pkts + i, rbptr_offset, mbuf_init, flag);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +609,38 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
return to_process;
}
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
uint32_t
nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582e..a39808cb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,10 @@
#include <rte_byteorder.h>
#include <rte_ethdev_driver.h>
+#define NICVF_RX_OFFLOAD_NONE 0x1
+#define NICVF_RX_OFFLOAD_CKSUM 0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
+
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
#ifndef __hot
@@ -86,9 +90,23 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index d4a83c37..dd52f38e 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -55,25 +55,27 @@ union mbuf_initializer {
};
struct nicvf_rxq {
+ MARKER rxq_fastpath_data_start;
+ uint8_t rbptr_offset;
+ uint16_t rx_free_thresh;
+ uint32_t head;
+ uint32_t qlen_mask;
+ int32_t recv_buffers;
+ int32_t available_space;
uint64_t mbuf_phys_off;
uintptr_t cq_status;
uintptr_t cq_door;
- union mbuf_initializer mbuf_initializer;
- nicvf_iova_addr_t phys;
- union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
- struct nicvf *nic;
struct rte_mempool *pool;
- uint32_t head;
- uint32_t qlen_mask;
- int32_t available_space;
- int32_t recv_buffers;
- uint16_t rx_free_thresh;
- uint16_t queue_id;
- uint16_t precharge_cnt;
+ union cq_entry_t *desc;
+ union mbuf_initializer mbuf_initializer;
+ MARKER rxq_fastpath_data_end;
uint8_t rx_drop_en;
+ uint16_t precharge_cnt;
uint16_t port_id;
- uint8_t rbptr_offset;
+ uint16_t queue_id;
+ struct nicvf *nic;
+ nicvf_iova_addr_t phys;
} __rte_cache_aligned;
struct nicvf {
@@ -85,6 +87,8 @@ struct nicvf {
bool loopback_supported;
bool pf_acked:1;
bool pf_nacked:1;
+ bool offload_cksum:1;
+ bool vlan_strip:1;
uint64_t hwcap;
uint8_t link_up;
uint8_t duplex;
@@ -99,6 +103,7 @@ struct nicvf {
struct rte_intr_handle intr_handle;
uint8_t cpi_alg;
uint16_t mtu;
+ int skip_bytes;
bool vlan_filter_en;
uint8_t mac_addr[ETHER_ADDR_LEN];
/* secondary queue set support */