summaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/virtio
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/virtio')
-rw-r--r--drivers/net/virtio/Makefile6
-rw-r--r--drivers/net/virtio/virtio_ethdev.c169
-rw-r--r--drivers/net/virtio/virtio_ethdev.h6
-rw-r--r--drivers/net/virtio/virtio_pci.c2
-rw-r--r--drivers/net/virtio/virtio_pci.h6
-rw-r--r--drivers/net/virtio/virtio_rxtx.c83
-rw-r--r--drivers/net/virtio/virtio_rxtx.h12
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c3
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c1
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c1
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c4
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c4
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c2
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c12
-rw-r--r--drivers/net/virtio/virtqueue.c25
-rw-r--r--drivers/net/virtio/virtqueue.h16
16 files changed, 256 insertions, 96 deletions
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index b21b8781..f2b5d1c3 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -38,6 +38,12 @@ LIB = librte_pmd_virtio.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
+LDLIBS += -lrte_bus_vdev
+endif
EXPORT_MAP := rte_pmd_virtio_version.map
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index e320811e..d2576d5e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -46,9 +46,11 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_common.h>
#include <rte_errno.h>
+#include <rte_cpuflags.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -72,11 +74,12 @@ static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
-static void virtio_dev_stats_get(struct rte_eth_dev *dev,
+static int virtio_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
@@ -162,7 +165,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl result;
+ struct virtio_pmd_ctrl *result;
struct virtqueue *vq;
ctrl->status = status;
@@ -253,10 +256,9 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- memcpy(&result, cvq->virtio_net_hdr_mz->addr,
- sizeof(struct virtio_pmd_ctrl));
+ result = cvq->virtio_net_hdr_mz->addr;
- return result.status;
+ return result->status;
}
static int
@@ -426,10 +428,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->phys_addr);
+ (uint64_t)mz->iova);
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
@@ -474,13 +476,13 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
hw->cvq = cvq;
@@ -491,7 +493,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
- vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
vq->offset = offsetof(struct rte_mbuf, buf_addr);
@@ -779,6 +781,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.stats_reset = virtio_dev_stats_reset,
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
+ .vlan_offload_set = virtio_dev_vlan_offload_set,
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
@@ -964,10 +967,12 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
-static void
+static int
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
virtio_update_stats(dev, stats);
+
+ return 0;
}
static void
@@ -1235,14 +1240,36 @@ virtio_interrupt_handler(void *param)
}
+/* set rx and tx handlers according to what is supported */
static void
-rx_func_get(struct rte_eth_dev *eth_dev)
+set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
- else
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+
+ if (hw->use_simple_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
}
/* Only support 1:1 queue/interrupt mapping so far.
@@ -1360,15 +1387,12 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
- eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
/* If host does not support both status and MSI-X then disable LSC */
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- rx_func_get(eth_dev);
-
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
@@ -1534,7 +1558,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
@@ -1544,12 +1567,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
}
virtio_set_vtpci_ops(hw);
- if (hw->use_simple_rxtx) {
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else {
- rx_func_get(eth_dev);
- }
+ set_rxtx_funcs(eth_dev);
+
return 0;
}
@@ -1659,9 +1678,11 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t req_features;
int ret;
PMD_INIT_LOG(DEBUG, "configure");
+ req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
if (dev->data->dev_conf.intr_conf.rxq) {
ret = virtio_init_device(dev, hw->req_guest_features);
@@ -1669,16 +1690,37 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- /* Virtio does L4 checksum but not L3! */
- if (rxmode->hw_ip_checksum) {
- PMD_DRV_LOG(NOTICE,
- "virtio does not support IP checksum");
+ /* The name hw_ip_checksum is a bit confusing since it can be
+ * set by the application to request L3 and/or L4 checksums. In
+ * case of virtio, only L4 checksum is supported.
+ */
+ if (rxmode->hw_ip_checksum)
+ req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+
+ if (rxmode->enable_lro)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+
+ /* if request features changed, reinit the device */
+ if (req_features != hw->req_guest_features) {
+ ret = virtio_init_device(dev, req_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (rxmode->hw_ip_checksum &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ PMD_DRV_LOG(ERR,
+ "rx checksum not available on this host");
return -ENOTSUP;
}
- if (rxmode->enable_lro) {
- PMD_DRV_LOG(NOTICE,
- "virtio does not support Large Receive Offload");
+ if (rxmode->enable_lro &&
+ (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ PMD_DRV_LOG(ERR,
+ "Large Receive Offload not available on this host");
return -ENOTSUP;
}
@@ -1690,7 +1732,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (rxmode->hw_vlan_filter
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
}
@@ -1703,6 +1745,23 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
+ hw->use_simple_rx = 1;
+ hw->use_simple_tx = 1;
+
+#if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+#endif
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+
+ if (rxmode->hw_ip_checksum)
+ hw->use_simple_rx = 0;
+
return 0;
}
@@ -1714,6 +1773,19 @@ virtio_dev_start(struct rte_eth_dev *dev)
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq __rte_unused;
struct virtio_hw *hw = dev->data->dev_private;
+ int ret;
+
+ /* Finish the initialization of the queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = virtio_dev_rx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret = virtio_dev_tx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
@@ -1751,9 +1823,16 @@ virtio_dev_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
+ /* Flush the old packets */
+ virtqueue_flush(rxvq->vq);
virtqueue_notify(rxvq->vq);
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_notify(txvq->vq);
+ }
+
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -1766,6 +1845,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ set_rxtx_funcs(dev);
hw->started = 1;
/* Initialize Link state */
@@ -1875,6 +1955,29 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
return (old.link_status == link.link_status) ? -1 : 0;
}
+static int
+virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->hw_vlan_filter &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+
+ PMD_DRV_LOG(NOTICE,
+ "vlan filtering not available on this host");
+
+ return -ENOTSUP;
+ }
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = rxmode->hw_vlan_strip;
+
+ return 0;
+}
+
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -1904,6 +2007,8 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index c3413c6d..2039bc54 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -92,10 +92,16 @@ int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
+int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id);
+
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index e6da6802..55b717c0 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -553,7 +553,7 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
uint32_t offset = cap->offset;
uint8_t *base;
- if (bar > 5) {
+ if (bar >= PCI_MAX_RESOURCE) {
PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
return NULL;
}
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 18caebdd..36d452c0 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -37,6 +37,7 @@
#include <stdint.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ethdev.h>
struct virtqueue;
@@ -259,8 +260,9 @@ struct virtio_hw {
uint8_t vlan_strip;
uint8_t use_msix;
uint8_t modern;
- uint8_t use_simple_rxtx;
- uint8_t port_id;
+ uint8_t use_simple_rx;
+ uint8_t use_simple_tx;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index e30377c5..390c137c 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -39,7 +39,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
@@ -50,7 +49,6 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_byteorder.h>
-#include <rte_cpuflags.h>
#include <rte_net.h>
#include <rte_ip.h>
#include <rte_udp.h>
@@ -81,7 +79,7 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
return VIRTQUEUE_NUSED(vq) >= offset;
}
-static void
+void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
@@ -300,6 +298,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
/* prepend cannot fail, checked by caller */
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
/* if offload disabled, it is not zeroed below, do it now */
if (offload == 0) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
@@ -421,9 +423,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_rx *rxvq;
- int error, nbufs;
- struct rte_mbuf *m;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
@@ -440,12 +439,26 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
dev->data->rx_queues[queue_idx] = rxvq;
+ return 0;
+}
+
+int
+virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_rx *rxvq = &vq->rxq;
+ struct rte_mbuf *m;
+ uint16_t desc_idx;
+ int error, nbufs;
+
+ PMD_INIT_FUNC_TRACE();
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- error = ENOSPC;
- if (hw->use_simple_rxtx) {
+ if (hw->use_simple_rx) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_ring.avail->ring[desc_idx] = desc_idx;
@@ -467,7 +480,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
break;
/* Enqueue allocated buffers */
- if (hw->use_simple_rxtx)
+ if (hw->use_simple_rx)
error = virtqueue_enqueue_recv_refill_simple(vq, m);
else
error = virtqueue_enqueue_recv_refill(vq, m);
@@ -490,31 +503,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-static void
-virtio_update_rxtx_handler(struct rte_eth_dev *dev,
- const struct rte_eth_txconf *tx_conf)
-{
- uint8_t use_simple_rxtx = 0;
- struct virtio_hw *hw = dev->data->dev_private;
-
-#if defined RTE_ARCH_X86
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
- use_simple_rxtx = 1;
-#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
- use_simple_rxtx = 1;
-#endif
- /* Use simple rx/tx func if single segment and no offloads */
- if (use_simple_rxtx &&
- (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO, "Using simple rx/tx path");
- dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- dev->rx_pkt_burst = virtio_recv_pkts_vec;
- hw->use_simple_rxtx = use_simple_rxtx;
- }
-}
-
/*
* struct rte_eth_dev *dev: Used to update dev
* uint16_t nb_desc: Defaults to values read from config space
@@ -534,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_tx *txvq;
uint16_t tx_free_thresh;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- virtio_update_rxtx_handler(dev, tx_conf);
+ /* cannot use simple rxtx funcs with multisegs or offloads */
+ if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
+ hw->use_simple_tx = 0;
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
@@ -563,9 +552,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
vq->vq_free_thresh = tx_free_thresh;
- if (hw->use_simple_rxtx) {
- uint16_t mid_idx = vq->vq_nentries >> 1;
+ dev->data->tx_queues[queue_idx] = txvq;
+ return 0;
+}
+int
+virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+ uint16_t queue_idx)
+{
+ uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ uint16_t mid_idx = vq->vq_nentries >> 1;
+ struct virtnet_tx *txvq = &vq->txq;
+ uint16_t desc_idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->use_simple_tx) {
for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
vq->vq_ring.avail->ring[desc_idx] =
desc_idx + mid_idx;
@@ -587,7 +591,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
VIRTQUEUE_DUMP(vq);
- dev->data->tx_queues[queue_idx] = txvq;
return 0;
}
@@ -670,7 +673,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
* In case of SCTP, this will be wrong since it's a CRC
* but there's nothing we can do.
*/
- uint16_t csum, off;
+ uint16_t csum = 0, off;
rte_raw_cksum_mbuf(m, hdr->csum_start,
rte_pktmbuf_pkt_len(m) - hdr->csum_start,
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 28f82d6a..54f1e849 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -54,7 +54,7 @@ struct virtnet_rx {
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
@@ -66,10 +66,10 @@ struct virtnet_tx {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
@@ -81,9 +81,9 @@ struct virtnet_ctl {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
- uint8_t port_id; /**< Device port identifier. */
- const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ uint16_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
};
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 542cf805..b5bc1c49 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -39,7 +39,6 @@
#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
@@ -65,6 +64,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct vring_desc *start_dp;
uint16_t desc_idx;
+ cookie->port = vq->rxq.port_id;
+
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
dxp = &vq->vq_descx[desc_idx];
dxp->cookie = (void *)cookie;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 6b40c7f7..b8b93551 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -43,7 +43,6 @@
#include <rte_ethdev.h>
#include <rte_errno.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 7cf0f8b8..94f65143 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -46,7 +46,6 @@
#include <rte_ethdev.h>
#include <rte_errno.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index f585de8c..689a5cff 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -95,9 +95,9 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
ifr.ifr_flags |= IFF_MULTI_QUEUE;
if (*p_ifname)
- strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ);
+ strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ - 1);
else
- strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ);
+ strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ - 1);
if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) {
PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno));
goto error;
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 4ad7b21b..97bd8326 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -130,6 +130,10 @@ vhost_user_read(int fd, struct vhost_user_msg *msg)
}
sz_payload = msg->size;
+
+ if ((size_t)sz_payload > sizeof(msg->payload))
+ goto fail;
+
if (sz_payload) {
ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
if (ret < sz_payload) {
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 79412714..906d7a2b 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -270,6 +270,8 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ /* For virtio vdev, no need to read counter for clean */
+ eth_dev->intr_handle->efd_counter_size = 0;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index c9614443..7be57ce6 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -40,7 +40,7 @@
#include <rte_malloc.h>
#include <rte_kvargs.h>
#include <rte_ethdev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include "virtio_ethdev.h"
@@ -86,7 +86,11 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
int flags;
flags = fcntl(dev->vhostfd, F_GETFL);
- fcntl(dev->vhostfd, F_SETFL, flags | O_NONBLOCK);
+ if (fcntl(dev->vhostfd, F_SETFL,
+ flags | O_NONBLOCK) == -1) {
+ PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
+ return;
+ }
r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
if (r == 0 || (r < 0 && errno != EAGAIN)) {
dev->status &= (~VIRTIO_NET_S_LINK_UP);
@@ -369,9 +373,9 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
*/
hw->use_msix = 1;
hw->modern = 0;
- hw->use_simple_rxtx = 0;
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
hw->virtio_user_dev = dev;
- data->dev_flags = RTE_ETH_DEV_DETACHABLE;
return eth_dev;
}
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 9ad77b8a..c3a536f8 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -59,3 +59,28 @@ virtqueue_detatch_unused(struct virtqueue *vq)
}
return NULL;
}
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_flush(struct virtqueue *vq)
+{
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+ uint16_t used_idx, desc_idx;
+ uint16_t nb_used, i;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ for (i = 0; i < nb_used; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ }
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2e120861..2305d91a 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -38,7 +38,6 @@
#include <rte_atomic.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_mempool.h>
#include "virtio_pci.h"
@@ -80,7 +79,7 @@ struct rte_mbuf;
#define VIRTIO_MBUF_ADDR(mb, vq) \
((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
-#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
+#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
#endif
/**
@@ -143,8 +142,8 @@ struct virtio_net_ctrl_mac {
} __attribute__((__packed__));
#define VIRTIO_NET_CTRL_MAC 1
- #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
- #define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
/**
* Control VLAN filtering
@@ -204,8 +203,8 @@ struct virtqueue {
struct virtnet_ctl cq;
};
- phys_addr_t vq_ring_mem; /**< physical address of vring,
- * or virtual address for virtio_user. */
+ rte_iova_t vq_ring_mem; /**< physical address of vring,
+ * or virtual address for virtio_user. */
/**
* Head of the free chain in the descriptor table. If
@@ -304,6 +303,9 @@ void virtqueue_dump(struct virtqueue *vq);
*/
struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+/* Flush the elements in the used ring. */
+void virtqueue_flush(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{
@@ -312,6 +314,8 @@ virtqueue_full(const struct virtqueue *vq)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{