summaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio/virtio_ethdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio/virtio_ethdev.c')
-rw-r--r--drivers/net/virtio/virtio_ethdev.c364
1 files changed, 288 insertions, 76 deletions
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f5961ab7..983b95f1 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -38,6 +38,7 @@
#include <unistd.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
@@ -86,7 +87,7 @@ static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void virtio_mac_addr_add(struct rte_eth_dev *dev,
+static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
@@ -485,11 +486,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
hw->cvq = cvq;
}
- /* For virtio_user case (that is when dev->pci_dev is NULL), we use
+ /* For virtio_user case (that is when hw->dev is NULL), we use
* virtual address. And we need properly set _offset_, please see
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
- if (dev->pci_dev)
+ if (!hw->virtio_user_dev)
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
@@ -545,6 +546,9 @@ virtio_free_queues(struct virtio_hw *hw)
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
@@ -563,9 +567,11 @@ virtio_free_queues(struct virtio_hw *hw)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
@@ -593,16 +599,29 @@ virtio_alloc_queues(struct rte_eth_dev *dev)
return 0;
}
+static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
+
static void
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
+ VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
+ if (intr_conf->rxq)
+ virtio_queues_unbind_intr(dev);
+
+ if (intr_conf->lsc || intr_conf->rxq) {
+ rte_intr_disable(dev->intr_handle);
+ rte_intr_efd_disable(dev->intr_handle);
+ rte_free(dev->intr_handle->intr_vec);
+ dev->intr_handle->intr_vec = NULL;
+ }
+
vtpci_reset(hw);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
@@ -617,7 +636,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -640,7 +659,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -663,7 +682,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -686,7 +705,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -708,15 +727,38 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
hw->vtnet_hdr_size;
uint32_t frame_size = mtu + ether_hdr_len;
+ uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
+
+ max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
- if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
- PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
- ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
+ if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
+ ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
return 0;
}
+static int
+virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_enable_intr(vq);
+ return 0;
+}
+
+static int
+virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}
+
/*
* dev_ops for virtio, bare necessities for basic operation
*/
@@ -738,7 +780,10 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
.rx_queue_setup = virtio_dev_rx_queue_setup,
+ .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
+ .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
@@ -980,7 +1025,7 @@ virtio_get_hwaddr(struct virtio_hw *hw)
}
}
-static void
+static int
virtio_mac_table_set(struct virtio_hw *hw,
const struct virtio_net_ctrl_mac *uc,
const struct virtio_net_ctrl_mac *mc)
@@ -990,7 +1035,7 @@ virtio_mac_table_set(struct virtio_hw *hw,
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
PMD_DRV_LOG(INFO, "host does not support mac table");
- return;
+ return -1;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
@@ -1005,9 +1050,10 @@ virtio_mac_table_set(struct virtio_hw *hw,
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
if (err != 0)
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+ return err;
}
-static void
+static int
virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused)
{
@@ -1018,7 +1064,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
if (index >= VIRTIO_MAX_MAC_ADDRS) {
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
- return;
+ return -EINVAL;
}
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
@@ -1035,7 +1081,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
}
- virtio_mac_table_set(hw, uc, mc);
+ return virtio_mac_table_set(hw, uc, mc);
}
static void
@@ -1122,6 +1168,18 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
+ /* If supported, ensure MTU value is valid before acknowledging it. */
+ if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
+ struct virtio_net_config config;
+
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config.mtu, sizeof(config.mtu));
+
+ if (config.mtu < ETHER_MIN_MTU)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ }
+
/*
* Negotiate features: Subset of device feature bits are written back
* guest feature bits.
@@ -1154,9 +1212,8 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
* Process Virtio Config changed interrupt and call the callback
* if link state changed.
*/
-static void
-virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+void
+virtio_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = param;
struct virtio_hw *hw = dev->data->dev_private;
@@ -1166,7 +1223,7 @@ virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
+ if (rte_intr_enable(dev->intr_handle) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
@@ -1187,6 +1244,95 @@ rx_func_get(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int
+virtio_queues_bind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ dev->intr_handle->intr_vec[i] = i + 1;
+ if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set queue vector");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void
+virtio_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
+ VTPCI_OPS(hw)->set_queue_irq(hw,
+ hw->vqs[i * VTNET_CQ],
+ VIRTIO_MSI_NO_VECTOR);
+}
+
+static int
+virtio_configure_intr(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (!rte_intr_cap_multiple(dev->intr_handle)) {
+ PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
+ return -ENOTSUP;
+ }
+
+ if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "Fail to create eventfd");
+ return -1;
+ }
+
+ if (!dev->intr_handle->intr_vec) {
+ dev->intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->max_queue_pairs * sizeof(int), 0);
+ if (!dev->intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
+ hw->max_queue_pairs);
+ return -ENOMEM;
+ }
+ }
+
+ /* Re-register callback to update max_intr */
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
+ /* DO NOT try to remove this! This function will enable msix, or QEMU
+ * will encounter SIGSEGV when DRIVER_OK is sent.
+ * And for legacy devices, this should be done before queue/vec binding
+ * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
+ * (22) will be ignored.
+ */
+ if (rte_intr_enable(dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+
+ if (virtio_queues_bind_intr(dev) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
+ return -1;
+ }
+
+ return 0;
+}
+
/* reset device and renegotiate features if needed */
static int
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
@@ -1194,7 +1340,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = NULL;
int ret;
/* Reset the device although not necessary at startup */
@@ -1208,13 +1354,17 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
+ if (!hw->virtio_user_dev) {
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ }
- /* If host does not support status then disable LSC */
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- else
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rx_func_get(eth_dev);
@@ -1264,6 +1414,32 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->max_queue_pairs = config->max_virtqueue_pairs;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config->mtu,
+ sizeof(config->mtu));
+
+ /*
+ * MTU value has already been checked at negotiation
+ * time, but check again in case it has changed since
+ * then, which should not happen.
+ */
+ if (config->mtu < ETHER_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
+ config->mtu);
+ return -1;
+ }
+
+ hw->max_mtu = config->mtu;
+ /* Set initial MTU to maximum one supported by vhost */
+ eth_dev->data->mtu = config->mtu;
+
+ } else {
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
+ }
+
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
config->max_virtqueue_pairs);
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
@@ -1280,6 +1456,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
ret = virtio_alloc_queues(eth_dev);
if (ret < 0)
return ret;
+
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (virtio_configure_intr(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "failed to configure interrupt");
+ return -1;
+ }
+ }
+
vtpci_reinit_complete(hw);
if (pci_dev)
@@ -1301,7 +1485,7 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
if (hw->modern) {
/*
* We don't have to re-parse the PCI config space, since
- * rte_eal_pci_map_device() makes sure the mapped address
+ * rte_pci_map_device() makes sure the mapped address
* in secondary process would equal to the one mapped in
* the primary process: error will be returned if that
* requirement is not met.
@@ -1310,12 +1494,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
* (such as dev_cfg, common_cfg, etc.) parsed from the
* primary process, which is stored in shared memory.
*/
- if (rte_eal_pci_map_device(pci_dev)) {
+ if (rte_pci_map_device(pci_dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
} else {
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
return -1;
}
@@ -1344,8 +1528,6 @@ int
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev;
- uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
int ret;
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
@@ -1355,7 +1537,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(eth_dev->pci_dev, hw);
+ ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
+ hw);
if (ret)
return ret;
}
@@ -1379,17 +1562,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
- pci_dev = eth_dev->pci_dev;
hw->port_id = eth_dev->data->port_id;
-
- if (pci_dev) {
- ret = vtpci_init(pci_dev, hw, &dev_flags);
+ /* For virtio_user case the hw->virtio_user_dev is populated by
+ * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
+ */
+ if (!hw->virtio_user_dev) {
+ ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
if (ret)
return ret;
}
- eth_dev->data->dev_flags = dev_flags;
-
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@@ -1397,7 +1579,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
return 0;
@@ -1406,8 +1588,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
-
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
@@ -1415,7 +1595,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -1426,29 +1605,37 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
/* reset interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_unregister(&pci_dev->intr_handle,
+ rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
- rte_eal_pci_unmap_device(pci_dev);
+ if (eth_dev->device)
+ rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
return 0;
}
-static struct eth_driver rte_virtio_pmd = {
- .pci_drv = {
- .driver = {
- .name = "net_virtio",
- },
- .id_table = pci_id_virtio_map,
- .drv_flags = RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
+static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
+ eth_virtio_dev_init);
+}
+
+static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+}
+
+static struct rte_pci_driver rte_virtio_pmd = {
+ .driver = {
+ .name = "net_virtio",
},
- .eth_dev_init = eth_virtio_dev_init,
- .eth_dev_uninit = eth_virtio_dev_uninit,
- .dev_private_size = sizeof(struct virtio_hw),
+ .id_table = pci_id_virtio_map,
+ .drv_flags = 0,
+ .probe = eth_virtio_pci_probe,
+ .remove = eth_virtio_pci_remove,
};
RTE_INIT(rte_virtio_pmd_init);
@@ -1460,7 +1647,7 @@ rte_virtio_pmd_init(void)
return;
}
- rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
+ rte_pci_register(&rte_virtio_pmd);
}
/*
@@ -1520,7 +1707,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
+ /* Enable vector (0) for Link State Intrerrupt */
+ if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
+ VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
return -EBUSY;
}
@@ -1543,16 +1732,22 @@ virtio_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "link status not supported by host");
return -ENOTSUP;
}
+ }
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
+ /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ * in device configure, but it could be unmapped when device is
+ * stopped.
+ */
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rxq) {
+ rte_intr_disable(dev->intr_handle);
+
+ if (rte_intr_enable(dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
@@ -1582,6 +1777,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
@@ -1636,13 +1836,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
- if (dev->data->dev_conf.intr_conf.lsc)
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ if (intr_conf->lsc || intr_conf->rxq)
+ rte_intr_disable(dev->intr_handle);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
}
@@ -1659,7 +1862,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = SPEED_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
@@ -1684,13 +1889,12 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- uint64_t tso_mask;
+ uint64_t tso_mask, host_features;
struct virtio_hw *hw = dev->data->dev_private;
- if (dev->pci_dev)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
- else
- dev_info->driver_name = "virtio_user PMD";
+ dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+
+ dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -1701,18 +1905,25 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
};
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ dev_info->rx_offload_capa = 0;
+ if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+ }
+ tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
-
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((hw->guest_features & tso_mask) == tso_mask)
@@ -1732,3 +1943,4 @@ __rte_unused uint8_t is_rx)
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio");