aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio')
-rw-r--r--drivers/net/virtio/meson.build27
-rw-r--r--drivers/net/virtio/virtio_ethdev.c166
-rw-r--r--drivers/net/virtio/virtio_ethdev.h8
-rw-r--r--drivers/net/virtio/virtio_rxtx.c9
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c86
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c14
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h3
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c76
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c175
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h8
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c131
11 files changed, 516 insertions, 187 deletions
diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
new file mode 100644
index 00000000..e43ce6bb
--- /dev/null
+++ b/drivers/net/virtio/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources += files('virtio_ethdev.c',
+ 'virtio_pci.c',
+ 'virtio_rxtx.c',
+ 'virtio_rxtx_simple.c',
+ 'virtqueue.c')
+deps += ['kvargs', 'bus_pci']
+
+if arch_subdir == 'x86'
+ sources += files('virtio_rxtx_simple_sse.c')
+elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')
+ sources += files('virtio_rxtx_simple_neon.c')
+endif
+
+if host_machine.system() == 'linux'
+ dpdk_conf.set('RTE_VIRTIO_USER', 1)
+
+ sources += files('virtio_user_ethdev.c',
+ 'virtio_user/vhost_kernel.c',
+ 'virtio_user/vhost_kernel_tap.c',
+ 'virtio_user/vhost_user.c',
+ 'virtio_user/virtio_user_dev.c')
+ deps += ['bus_vdev']
+endif
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 884f74ad..df50a571 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -14,7 +14,6 @@
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -29,6 +28,7 @@
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
@@ -68,7 +68,7 @@ static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
-static void virtio_mac_addr_set(struct rte_eth_dev *dev,
+static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_intr_enable(struct rte_eth_dev *dev);
@@ -392,8 +392,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY,
- 0, VIRTIO_PCI_VRING_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
@@ -418,8 +418,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, 0,
- RTE_CACHE_LINE_SIZE);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
hdr_mz = rte_memzone_lookup(vq_hdr_name);
@@ -774,46 +774,6 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.mac_addr_set = virtio_mac_addr_set,
};
-static inline int
-virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1097,7 +1057,7 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
virtio_mac_table_set(hw, uc, mc);
}
-static void
+static int
virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct virtio_hw *hw = dev->data->dev_private;
@@ -1113,9 +1073,14 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
- virtio_send_command(hw->cvq, &ctrl, &len, 1);
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
- virtio_set_hwaddr(hw);
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+ }
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ return -ENOTSUP;
+
+ virtio_set_hwaddr(hw);
+ return 0;
}
static int
@@ -1273,9 +1238,16 @@ static void
virtio_notify_peers(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_rx *rxvq = dev->data->rx_queues[0];
+ struct virtnet_rx *rxvq;
struct rte_mbuf *rarp_mbuf;
+ if (!dev->data->rx_queues)
+ return;
+
+ rxvq = dev->data->rx_queues[0];
+ if (!rxvq)
+ return;
+
rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
(struct ether_addr *)hw->mac_addr);
if (rarp_mbuf == NULL) {
@@ -1333,7 +1305,8 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_NET_S_ANNOUNCE) {
virtio_notify_peers(dev);
- virtio_ack_link_announce(dev);
+ if (hw->cvq)
+ virtio_ack_link_announce(dev);
}
}
@@ -1744,9 +1717,51 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int vdpa_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+vdpa_mode_selected(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = "vdpa";
+ int ret = 0;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto exit;
+
+ /* vdpa mode selected when there's a key-value pair: vdpa=1 */
+ if (rte_kvargs_process(kvlist, key,
+ vdpa_check_handler, NULL) < 0) {
+ goto exit;
+ }
+ ret = 1;
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
+ /* virtio pmd skips probe if device needs to work in vdpa mode */
+ if (vdpa_mode_selected(pci_dev->device.devargs))
+ return 1;
+
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
eth_virtio_dev_init);
}
@@ -1787,6 +1802,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t rx_offloads = rxmode->offloads;
uint64_t req_features;
int ret;
@@ -1799,14 +1815,11 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- /* The name hw_ip_checksum is a bit confusing since it can be
- * set by the application to request L3 and/or L4 checksums. In
- * case of virtio, only L4 checksum is supported.
- */
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
@@ -1818,14 +1831,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- if (rxmode->hw_ip_checksum &&
+ if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
+ if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
@@ -1837,9 +1851,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ hw->vlan_strip = 1;
- if (rxmode->hw_vlan_filter
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
@@ -1870,7 +1885,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
hw->use_simple_tx = 0;
}
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
hw->use_simple_rx = 0;
return 0;
@@ -2028,21 +2044,21 @@ virtio_dev_stop(struct rte_eth_dev *dev)
hw->started = 0;
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint16_t status;
struct virtio_hw *hw = dev->data->dev_private;
+
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_read_link_status(dev, &link);
- old = link;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
if (hw->started == 0) {
link.link_status = ETH_LINK_DOWN;
@@ -2063,9 +2079,8 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
} else {
link.link_status = ETH_LINK_UP;
}
- virtio_dev_atomic_write_link_status(dev, &link);
- return (old.link_status == link.link_status) ? -1 : 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -2073,9 +2088,10 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t offloads = rxmode->offloads;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->hw_vlan_filter &&
+ if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
@@ -2086,7 +2102,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_STRIP_MASK)
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
@@ -2099,7 +2115,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -2112,18 +2127,21 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
};
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM;
}
+ if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
if ((host_features & tso_mask) == tso_mask)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 4539d2e4..bb40064e 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -43,6 +43,14 @@
1u << VIRTIO_NET_F_GUEST_CSUM | \
1u << VIRTIO_NET_F_GUEST_TSO4 | \
1u << VIRTIO_NET_F_GUEST_TSO6)
+
+#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \
+ (DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_LRO | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+
/*
* CQ function prototype
*/
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 8dbf2a30..92fab217 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,10 +38,6 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
-
-#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
int
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
@@ -389,7 +385,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
@@ -410,6 +406,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
rte_exit(EXIT_FAILURE,
"Cannot allocate mbufs for rx virtqueue");
}
+
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
@@ -502,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
/* cannot use simple rxtx funcs with multisegs or offloads */
- if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
+ if (dev->data->dev_conf.txmode.offloads)
hw->use_simple_tx = 0;
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 8d0a1ab2..b2444096 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -70,6 +70,32 @@ static uint64_t vhost_req_user_to_kernel[] = {
[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
};
+struct walk_arg {
+ struct vhost_memory_kernel *vm;
+ uint32_t region_nr;
+};
+static int
+add_memory_region(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct walk_arg *wa = arg;
+ struct vhost_memory_region *mr;
+ void *start_addr;
+
+ if (wa->region_nr >= max_regions)
+ return -1;
+
+ mr = &wa->vm->regions[wa->region_nr++];
+ start_addr = ms->addr;
+
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->memory_size = len;
+ mr->mmap_offset = 0;
+
+ return 0;
+}
+
/* By default, vhost kernel module allows 64 regions, but DPDK allows
* 256 segments. As a relief, below function merges those virtually
* adjacent memsegs into one region.
@@ -77,63 +103,24 @@ static uint64_t vhost_req_user_to_kernel[] = {
static struct vhost_memory_kernel *
prepare_vhost_memory_kernel(void)
{
- uint32_t i, j, k = 0;
- struct rte_memseg *seg;
- struct vhost_memory_region *mr;
struct vhost_memory_kernel *vm;
+ struct walk_arg wa;
vm = malloc(sizeof(struct vhost_memory_kernel) +
- max_regions *
- sizeof(struct vhost_memory_region));
+ max_regions *
+ sizeof(struct vhost_memory_region));
if (!vm)
return NULL;
- for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
- seg = &rte_eal_get_configuration()->mem_config->memseg[i];
- if (!seg->addr)
- break;
-
- int new_region = 1;
-
- for (j = 0; j < k; ++j) {
- mr = &vm->regions[j];
-
- if (mr->userspace_addr + mr->memory_size ==
- (uint64_t)(uintptr_t)seg->addr) {
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
-
- if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
- mr->userspace_addr) {
- mr->guest_phys_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
- }
-
- if (new_region == 0)
- continue;
-
- mr = &vm->regions[k++];
- /* use vaddr here! */
- mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size = seg->len;
- mr->mmap_offset = 0;
+ wa.region_nr = 0;
+ wa.vm = vm;
- if (k >= max_regions) {
- free(vm);
- return NULL;
- }
+ if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
+ free(vm);
+ return NULL;
}
- vm->nregions = k;
+ vm->nregions = wa.region_nr;
vm->padding = 0;
return vm;
}
@@ -351,7 +338,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
else
hdr_size = sizeof(struct virtio_net_hdr);
- tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+ (char *)dev->mac_addr);
if (tapfd < 0) {
PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
return -1;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 1a47a348..9ea7ade7 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -7,15 +7,19 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <net/if.h>
+#include <net/if_arp.h>
#include <errno.h>
#include <string.h>
#include <limits.h>
+#include <rte_ether.h>
+
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
int
-vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac)
{
unsigned int tap_features;
int sndbuf = INT_MAX;
@@ -94,6 +98,14 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
strerror(errno));
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
+ goto error;
+ }
+
if (!(*p_ifname))
*p_ifname = strdup(ifr.ifr_name);
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index 7d52e6b7..01a026f5 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -35,4 +35,5 @@
/* Constants */
#define PATH_NET_TUN "/dev/net/tun"
-int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 91c6449b..ef6e43df 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -138,12 +138,13 @@ struct hugepage_file_info {
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
{
- int idx;
+ int idx, k, exist;
FILE *f;
char buf[BUFSIZ], *tmp, *tail;
char *str_underline, *str_start;
int huge_index;
uint64_t v_start, v_end;
+ struct stat stats;
f = fopen("/proc/self/maps", "r");
if (!f) {
@@ -183,16 +184,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
if (sscanf(str_start, "map_%d", &huge_index) != 1)
continue;
+ /* skip duplicated file which is mapped to different regions */
+ for (k = 0, exist = -1; k < idx; ++k) {
+ if (!strcmp(huges[k].path, tmp)) {
+ exist = k;
+ break;
+ }
+ }
+ if (exist >= 0)
+ continue;
+
if (idx >= max) {
PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
goto error;
}
+
huges[idx].addr = v_start;
- huges[idx].size = v_end - v_start;
+ huges[idx].size = v_end - v_start; /* To be corrected later */
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
+ /* correct the size for files who have many regions */
+ for (k = 0; k < idx; ++k) {
+ if (stat(huges[k].path, &stats) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+ huges[k].path, strerror(errno));
+ continue;
+ }
+ huges[k].size = stats.st_size;
+ PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+ huges[k].path, huges[k].size);
+ }
+
fclose(f);
return idx;
@@ -263,6 +287,9 @@ vhost_user_sock(struct virtio_user_dev *dev,
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+ if (dev->is_server && vhostfd < 0)
+ return -1;
+
msg.request = req;
msg.flags = VHOST_USER_VERSION;
msg.size = 0;
@@ -378,6 +405,30 @@ vhost_user_sock(struct virtio_user_dev *dev,
return 0;
}
+#define MAX_VIRTIO_USER_BACKLOG 1
+static int
+virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
+{
+ int ret;
+ int flag;
+ int fd = dev->listenfd;
+
+ ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
+ dev->path, strerror(errno));
+ return -1;
+ }
+ ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
+ if (ret < 0)
+ return -1;
+
+ flag = fcntl(fd, F_GETFL);
+ fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+
+ return 0;
+}
+
/**
* Set up environment to talk with a vhost user backend.
*
@@ -405,13 +456,24 @@ vhost_user_setup(struct virtio_user_dev *dev)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
- if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
- PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
- close(fd);
- return -1;
+
+ if (dev->is_server) {
+ dev->listenfd = fd;
+ if (virtio_user_start_server(dev, &un) < 0) {
+ PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = -1;
+ } else {
+ if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+ PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = fd;
}
- dev->vhostfd = fd;
return 0;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index f90fee9e..4322527f 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,8 @@
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
static int
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
@@ -94,11 +96,27 @@ virtio_user_queue_setup(struct virtio_user_dev *dev,
}
int
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+int
virtio_user_start_device(struct virtio_user_dev *dev)
{
uint64_t features;
int ret;
+ pthread_mutex_lock(&dev->mutex);
+
+ if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+ goto error;
+
/* Do not check return as already done in init, or reset in stop */
dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
@@ -132,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
*/
dev->ops->enable_qp(dev, 0, 1);
+ dev->started = true;
+ pthread_mutex_unlock(&dev->mutex);
+
return 0;
error:
+ pthread_mutex_unlock(&dev->mutex);
/* TODO: free resource here or caller to check */
return -1;
}
@@ -142,13 +164,17 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
+ pthread_mutex_lock(&dev->mutex);
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+ pthread_mutex_unlock(&dev->mutex);
return -1;
}
+ dev->started = false;
+ pthread_mutex_unlock(&dev->mutex);
return 0;
}
@@ -174,17 +200,6 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
}
}
-int
-is_vhost_user_by_type(const char *path)
-{
- struct stat sb;
-
- if (stat(path, &sb) == -1)
- return 0;
-
- return S_ISSOCK(sb.st_mode);
-}
-
static int
virtio_user_dev_init_notify(struct virtio_user_dev *dev)
{
@@ -254,10 +269,41 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->fd = -1;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
+ else if (dev->is_server)
+ eth_dev->intr_handle->fd = dev->listenfd;
return 0;
}
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+ const void *addr __rte_unused,
+ size_t len __rte_unused,
+ void *arg)
+{
+ struct virtio_user_dev *dev = arg;
+ uint16_t i;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->started == false)
+ goto exit;
+
+ /* Step 1: pause the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 0);
+
+ /* Step 2: update memory regions */
+ dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+ /* Step 3: resume the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 1);
+
+exit:
+ pthread_mutex_unlock(&dev->mutex);
+}
+
static int
virtio_user_dev_setup(struct virtio_user_dev *dev)
{
@@ -267,21 +313,32 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
dev->vhostfds = NULL;
dev->tapfds = NULL;
- if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
- } else {
- dev->ops = &ops_kernel;
-
- dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
- dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
- if (!dev->vhostfds || !dev->tapfds) {
- PMD_INIT_LOG(ERR, "Failed to malloc");
+ if (dev->is_server) {
+ if (access(dev->path, F_OK) == 0 &&
+ !is_vhost_user_by_type(dev->path)) {
+ PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
-
- for (q = 0; q < dev->max_queue_pairs; ++q) {
- dev->vhostfds[q] = -1;
- dev->tapfds[q] = -1;
+ dev->ops = &ops_user;
+ } else {
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
}
}
@@ -320,7 +377,9 @@ int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname)
{
+ pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
+ dev->started = 0;
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
@@ -337,18 +396,33 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
return -1;
}
- if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
- PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
- return -1;
- }
+ if (!dev->is_server) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
+ NULL) < 0) {
+ PMD_INIT_LOG(ERR, "set_owner fails: %s",
+ strerror(errno));
+ return -1;
+ }
- if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
- &dev->device_features) < 0) {
- PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
- return -1;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ } else {
+ /* We just pretend vhost-user can support all these features.
+ * Note that this could be problematic that if some feature is
+ * negotiated but not supported by the vhost-user which comes
+ * later.
+ */
+ dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
+
if (dev->mac_specified)
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ else
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
if (cq) {
/* device does not really need to know anything about CQ,
@@ -371,6 +445,15 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+ virtio_user_mem_event_cb, dev)) {
+ if (rte_errno != ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to register mem event"
+ " callback\n");
+ return -1;
+ }
+ }
+
return 0;
}
@@ -381,6 +464,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
virtio_user_stop_device(dev);
+ rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
close(dev->callfds[i]);
close(dev->kickfds[i]);
@@ -388,6 +473,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
close(dev->vhostfd);
+ if (dev->is_server && dev->listenfd >= 0) {
+ close(dev->listenfd);
+ dev->listenfd = -1;
+ }
+
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i)
close(dev->vhostfds[i]);
@@ -396,9 +486,12 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
}
free(dev->ifname);
+
+ if (dev->is_server)
+ unlink(dev->path);
}
-static uint8_t
+uint8_t
virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
{
uint16_t i;
@@ -410,11 +503,17 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
return -1;
}
- for (i = 0; i < q_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 1);
- for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 0);
-
+ /* Server mode can't enable queue pairs if vhostfd is invalid,
+ * always return 0 in this case.
+ */
+ if (dev->vhostfd >= 0) {
+ for (i = 0; i < q_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 0);
+ } else if (!dev->is_server) {
+ ret = ~0;
+ }
dev->queue_pairs = q_pairs;
return ret;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 64467b4f..d2d4cb82 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -6,6 +6,7 @@
#define _VIRTIO_USER_DEV_H
#include <limits.h>
+#include <stdbool.h>
#include "../virtio_pci.h"
#include "../virtio_ring.h"
#include "vhost.h"
@@ -13,6 +14,8 @@
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
+ int listenfd; /* listening fd */
+ bool is_server; /* server or client mode */
/* for vhost_kernel backend */
char *ifname;
@@ -31,11 +34,13 @@ struct virtio_user_dev {
*/
uint64_t device_features; /* supported features by device */
uint8_t status;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
struct virtio_user_backend_ops *ops;
+ pthread_mutex_t mutex;
+ bool started;
};
int is_vhost_user_by_type(const char *path);
@@ -45,4 +50,5 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 26364900..1c102ca7 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -24,15 +24,100 @@
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
+static int
+virtio_user_server_reconnect(struct virtio_user_dev *dev)
+{
+ int ret;
+ int flag;
+ int connectfd;
+ uint64_t features = dev->device_features;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ connectfd = accept(dev->listenfd, NULL, NULL);
+ if (connectfd < 0)
+ return -1;
+
+ dev->vhostfd = connectfd;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ features &= ~dev->device_features;
+ /* For following bits, vhost-user doesn't really need to know */
+ features &= ~(1ull << VIRTIO_NET_F_MAC);
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ if (features)
+ PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!",
+ features);
+
+ dev->features &= dev->device_features;
+
+ flag = fcntl(connectfd, F_GETFD);
+ fcntl(connectfd, F_SETFL, flag | O_NONBLOCK);
+
+ ret = virtio_user_start_device(dev);
+ if (ret < 0)
+ return -1;
+
+ if (dev->queue_pairs > 1) {
+ ret = virtio_user_handle_mq(dev, dev->queue_pairs);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
+ return -1;
+ }
+ }
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return -1;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+ eth_dev->intr_handle->fd = connectfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+ }
+ PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
+ return 0;
+}
+
static void
virtio_user_delayed_handler(void *param)
{
struct virtio_hw *hw = (struct virtio_hw *)param;
- struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- rte_intr_callback_unregister(dev->intr_handle,
- virtio_interrupt_handler,
- dev);
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (dev->is_server) {
+ if (dev->vhostfd >= 0) {
+ close(dev->vhostfd);
+ dev->vhostfd = -1;
+ }
+ eth_dev->intr_handle->fd = dev->listenfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return;
+ }
+ }
}
static void
@@ -67,12 +152,10 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
dev->status &= (~VIRTIO_NET_S_LINK_UP);
PMD_DRV_LOG(ERR, "virtio-user port %u is down",
hw->port_id);
- /* Only client mode is available now. Once the
- * connection is broken, it can never be up
- * again. Besides, this function could be called
- * in the process of interrupt handling,
- * callback cannot be unregistered here, set an
- * alarm to do it.
+
+ /* This function could be called in the process
+ * of interrupt handling, callback cannot be
+ * unregistered here, set an alarm to do it.
*/
rte_eal_alarm_set(1,
virtio_user_delayed_handler,
@@ -85,7 +168,12 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
return;
}
+ } else if (dev->is_server) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ if (virtio_user_server_reconnect(dev) >= 0)
+ dev->status |= VIRTIO_NET_S_LINK_UP;
}
+
*(uint16_t *)dst = dev->status;
}
@@ -278,12 +366,15 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_QUEUE_SIZE,
#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
VIRTIO_USER_ARG_INTERFACE_NAME,
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
+ VIRTIO_USER_ARG_SERVER_MODE,
NULL
};
#define VIRTIO_USER_DEF_CQ_EN 0
#define VIRTIO_USER_DEF_Q_NUM 1
#define VIRTIO_USER_DEF_Q_SZ 256
+#define VIRTIO_USER_DEF_SERVER_MODE 0
static int
get_string_arg(const char *key __rte_unused,
@@ -378,6 +469,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+ uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -445,6 +537,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
+ &get_integer_arg, &server_mode) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_SERVER_MODE);
+ goto end;
+ }
+ }
+
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
&get_integer_arg, &cq) < 0) {
@@ -469,6 +570,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ struct virtio_user_dev *vu_dev;
+
eth_dev = virtio_user_eth_dev_alloc(dev);
if (!eth_dev) {
PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
@@ -476,12 +579,18 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
hw = eth_dev->data->dev_private;
+ vu_dev = virtio_user_get_dev(hw);
+ if (server_mode == 1)
+ vu_dev->is_server = true;
+ else
+ vu_dev->is_server = false;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
queue_size, mac_addr, &ifname) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
} else {
eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
if (!eth_dev)
@@ -494,6 +603,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
+ rte_eth_dev_probing_finish(eth_dev);
ret = 0;
end: