diff options
Diffstat (limited to 'drivers/net/virtio/virtio_user')
-rw-r--r-- | drivers/net/virtio/virtio_user/vhost.h | 51 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/vhost_kernel.c | 403 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/vhost_kernel_tap.c | 133 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/vhost_kernel_tap.h | 67 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/vhost_user.c | 98 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/virtio_user_dev.c | 246 | ||||
-rw-r--r-- | drivers/net/virtio/virtio_user/virtio_user_dev.h | 20 |
7 files changed, 891 insertions, 127 deletions
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h index 7adb55f5..5c983bd4 100644 --- a/drivers/net/virtio/virtio_user/vhost.h +++ b/drivers/net/virtio/virtio_user/vhost.h @@ -42,8 +42,6 @@ #include "../virtio_logs.h" #include "../virtqueue.h" -#define VHOST_MEMORY_MAX_NREGIONS 8 - struct vhost_vring_state { unsigned int index; unsigned int num; @@ -98,6 +96,8 @@ enum vhost_user_request { VHOST_USER_MAX }; +const char * const vhost_msg_strings[VHOST_USER_MAX]; + struct vhost_memory_region { uint64_t guest_phys_addr; uint64_t memory_size; /* bytes */ @@ -105,42 +105,19 @@ struct vhost_memory_region { uint64_t mmap_offset; }; -struct vhost_memory { - uint32_t nregions; - uint32_t padding; - struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS]; -}; - -struct vhost_user_msg { - enum vhost_user_request request; +struct virtio_user_dev; -#define VHOST_USER_VERSION_MASK 0x3 -#define VHOST_USER_REPLY_MASK (0x1 << 2) - uint32_t flags; - uint32_t size; /* the following payload size */ - union { -#define VHOST_USER_VRING_IDX_MASK 0xff -#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) - uint64_t u64; - struct vhost_vring_state state; - struct vhost_vring_addr addr; - struct vhost_memory memory; - } payload; - int fds[VHOST_MEMORY_MAX_NREGIONS]; -} __attribute((packed)); - -#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64) -#define VHOST_USER_PAYLOAD_SIZE \ - (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE) - -/* The version of the protocol we support */ -#define VHOST_USER_VERSION 0x1 - -#define VHOST_USER_F_PROTOCOL_FEATURES 30 -#define VHOST_USER_MQ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) +struct virtio_user_backend_ops { + int (*setup)(struct virtio_user_dev *dev); + int (*send_request)(struct virtio_user_dev *dev, + enum vhost_user_request req, + void *arg); + int (*enable_qp)(struct virtio_user_dev *dev, + uint16_t pair_idx, + int enable); +}; -int vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg); -int vhost_user_setup(const char *path); -int vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable); +struct virtio_user_backend_ops ops_user; +struct virtio_user_backend_ops ops_kernel; #endif diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c new file mode 100644 index 00000000..68d28b13 --- /dev/null +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c @@ -0,0 +1,403 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> + +#include <rte_memory.h> +#include <rte_eal_memconfig.h> + +#include "vhost.h" +#include "virtio_user_dev.h" +#include "vhost_kernel_tap.h" + +struct vhost_memory_kernel { + uint32_t nregions; + uint32_t padding; + struct vhost_memory_region regions[0]; +}; + +/* vhost kernel ioctls */ +#define VHOST_VIRTIO 0xAF +#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64) +#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64) +#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01) +#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02) +#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel) +#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64) +#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) +#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state) +#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr) +#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state) +#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state) +#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file) +#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file) +#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file) +#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) + +static uint64_t max_regions = 64; + +static void +get_vhost_kernel_max_regions(void) +{ + int fd; + char buf[20] = {'\0'}; + + fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY); + if (fd < 0) + return; + + if (read(fd, buf, sizeof(buf) - 1) > 0) + max_regions = strtoull(buf, NULL, 10); + + close(fd); +} + +static uint64_t vhost_req_user_to_kernel[] = { + [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER, + [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER, + [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES, + [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES, + [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL, + [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM, + [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE, + [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE, + [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR, + [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK, + [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE, +}; + +/* By default, vhost kernel module allows 64 regions, but DPDK allows + * 256 segments. As a relief, below function merges those virtually + * adjacent memsegs into one region. + */ +static struct vhost_memory_kernel * +prepare_vhost_memory_kernel(void) +{ + uint32_t i, j, k = 0; + struct rte_memseg *seg; + struct vhost_memory_region *mr; + struct vhost_memory_kernel *vm; + + vm = malloc(sizeof(struct vhost_memory_kernel) + + max_regions * + sizeof(struct vhost_memory_region)); + if (!vm) + return NULL; + + for (i = 0; i < RTE_MAX_MEMSEG; ++i) { + seg = &rte_eal_get_configuration()->mem_config->memseg[i]; + if (!seg->addr) + break; + + int new_region = 1; + + for (j = 0; j < k; ++j) { + mr = &vm->regions[j]; + + if (mr->userspace_addr + mr->memory_size == + (uint64_t)(uintptr_t)seg->addr) { + mr->memory_size += seg->len; + new_region = 0; + break; + } + + if ((uint64_t)(uintptr_t)seg->addr + seg->len == + mr->userspace_addr) { + mr->guest_phys_addr = + (uint64_t)(uintptr_t)seg->addr; + mr->userspace_addr = + (uint64_t)(uintptr_t)seg->addr; + mr->memory_size += seg->len; + new_region = 0; + break; + } + } + + if (new_region == 0) + continue; + + mr = &vm->regions[k++]; + /* use vaddr here! */ + mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr; + mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr; + mr->memory_size = seg->len; + mr->mmap_offset = 0; + + if (k >= max_regions) { + free(vm); + return NULL; + } + } + + vm->nregions = k; + vm->padding = 0; + return vm; +} + +/* with below features, vhost kernel does not need to do the checksum and TSO, + * these info will be passed to virtio_user through virtio net header. + */ +#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \ + ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \ + (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ + (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ + (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ + (1ULL << VIRTIO_NET_F_GUEST_UFO)) + +/* with below features, when flows from virtio_user to vhost kernel + * (1) if flows goes up through the kernel networking stack, it does not need + * to verify checksum, which can save CPU cycles; + * (2) if flows goes through a Linux bridge and outside from an interface + * (kernel driver), checksum and TSO will be done by GSO in kernel or even + * offloaded into real physical device. + */ +#define VHOST_KERNEL_HOST_OFFLOADS_MASK \ + ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \ + (1ULL << VIRTIO_NET_F_HOST_TSO6) | \ + (1ULL << VIRTIO_NET_F_CSUM)) + +static int +tap_supporte_mq(void) +{ + int tapfd; + unsigned int tap_features; + + tapfd = open(PATH_NET_TUN, O_RDWR); + if (tapfd < 0) { + PMD_DRV_LOG(ERR, "fail to open %s: %s", + PATH_NET_TUN, strerror(errno)); + return -1; + } + + if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) { + PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno)); + close(tapfd); + return -1; + } + + close(tapfd); + return tap_features & IFF_MULTI_QUEUE; +} + +static int +vhost_kernel_ioctl(struct virtio_user_dev *dev, + enum vhost_user_request req, + void *arg) +{ + int ret = -1; + unsigned int i; + uint64_t req_kernel; + struct vhost_memory_kernel *vm = NULL; + int vhostfd; + unsigned int queue_sel; + + PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]); + + req_kernel = vhost_req_user_to_kernel[req]; + + if (req_kernel == VHOST_SET_MEM_TABLE) { + vm = prepare_vhost_memory_kernel(); + if (!vm) + return -1; + arg = (void *)vm; + } + + if (req_kernel == VHOST_SET_FEATURES) { + /* We don't need memory protection here */ + *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM); + + /* VHOST kernel does not know about below flags */ + *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK; + *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK; + + *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ); + } + + switch (req_kernel) { + case VHOST_SET_VRING_NUM: + case VHOST_SET_VRING_ADDR: + case VHOST_SET_VRING_BASE: + case VHOST_GET_VRING_BASE: + case VHOST_SET_VRING_KICK: + case VHOST_SET_VRING_CALL: + queue_sel = *(unsigned int *)arg; + vhostfd = dev->vhostfds[queue_sel / 2]; + *(unsigned int *)arg = queue_sel % 2; + PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u", + vhostfd, *(unsigned int *)arg); + break; + default: + vhostfd = -1; + } + if (vhostfd == -1) { + for (i = 0; i < dev->max_queue_pairs; ++i) { + if (dev->vhostfds[i] < 0) + continue; + + ret = ioctl(dev->vhostfds[i], req_kernel, arg); + if (ret < 0) + break; + } + } else { + ret = ioctl(vhostfd, req_kernel, arg); + } + + if (!ret && req_kernel == VHOST_GET_FEATURES) { + /* with tap as the backend, all these features are supported + * but not claimed by vhost-net, so we add them back when + * reporting to upper layer. + */ + *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK; + *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK; + + /* vhost_kernel will not declare this feature, but it does + * support multi-queue. + */ + if (tap_supporte_mq()) + *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ); + } + + if (vm) + free(vm); + + if (ret < 0) + PMD_DRV_LOG(ERR, "%s failed: %s", + vhost_msg_strings[req], strerror(errno)); + + return ret; +} + +/** + * Set up environment to talk with a vhost kernel backend. + * + * @return + * - (-1) if fail to set up; + * - (>=0) if successful. + */ +static int +vhost_kernel_setup(struct virtio_user_dev *dev) +{ + int vhostfd; + uint32_t i; + + get_vhost_kernel_max_regions(); + + for (i = 0; i < dev->max_queue_pairs; ++i) { + vhostfd = open(dev->path, O_RDWR); + if (vhostfd < 0) { + PMD_DRV_LOG(ERR, "fail to open %s, %s", + dev->path, strerror(errno)); + return -1; + } + + dev->vhostfds[i] = vhostfd; + } + + return 0; +} + +static int +vhost_kernel_set_backend(int vhostfd, int tapfd) +{ + struct vhost_vring_file f; + + f.fd = tapfd; + f.index = 0; + if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) { + PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s", + strerror(errno)); + return -1; + } + + f.index = 1; + if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) { + PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s", + strerror(errno)); + return -1; + } + + return 0; +} + +static int +vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev, + uint16_t pair_idx, + int enable) +{ + int hdr_size; + int vhostfd; + int tapfd; + int req_mq = (dev->max_queue_pairs > 1); + + vhostfd = dev->vhostfds[pair_idx]; + + if (!enable) { + if (dev->tapfds[pair_idx] >= 0) { + close(dev->tapfds[pair_idx]); + dev->tapfds[pair_idx] = -1; + } + return vhost_kernel_set_backend(vhostfd, -1); + } else if (dev->tapfds[pair_idx] >= 0) { + return 0; + } + + if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) || + (dev->features & (1ULL << VIRTIO_F_VERSION_1))) + hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); + else + hdr_size = sizeof(struct virtio_net_hdr); + + tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq); + if (tapfd < 0) { + PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel"); + return -1; + } + + if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) { + PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel"); + close(tapfd); + return -1; + } + + dev->tapfds[pair_idx] = tapfd; + return 0; +} + +struct virtio_user_backend_ops ops_kernel = { + .setup = vhost_kernel_setup, + .send_request = vhost_kernel_ioctl, + .enable_qp = vhost_kernel_enable_queue_pair +}; diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c new file mode 100644 index 00000000..f585de8c --- /dev/null +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c @@ -0,0 +1,133 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <net/if.h> +#include <errno.h> +#include <string.h> +#include <limits.h> + +#include "vhost_kernel_tap.h" +#include "../virtio_logs.h" + +int +vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq) +{ + unsigned int tap_features; + int sndbuf = INT_MAX; + struct ifreq ifr; + int tapfd; + unsigned int offload = + TUN_F_CSUM | + TUN_F_TSO4 | + TUN_F_TSO6 | + TUN_F_TSO_ECN | + TUN_F_UFO; + + /* TODO: + * 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len + * 2. get number of memory regions from vhost module parameter + * max_mem_regions, supported in newer version linux kernel + */ + tapfd = open(PATH_NET_TUN, O_RDWR); + if (tapfd < 0) { + PMD_DRV_LOG(ERR, "fail to open %s: %s", + PATH_NET_TUN, strerror(errno)); + return -1; + } + + /* Construct ifr */ + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; + + if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) { + PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno)); + goto error; + } + if (tap_features & IFF_ONE_QUEUE) + ifr.ifr_flags |= IFF_ONE_QUEUE; + + /* Let tap instead of vhost-net handle vnet header, as the latter does + * not support offloading. And in this case, we should not set feature + * bit VHOST_NET_F_VIRTIO_NET_HDR. + */ + if (tap_features & IFF_VNET_HDR) { + ifr.ifr_flags |= IFF_VNET_HDR; + } else { + PMD_DRV_LOG(ERR, "TAP does not support IFF_VNET_HDR"); + goto error; + } + + if (req_mq) + ifr.ifr_flags |= IFF_MULTI_QUEUE; + + if (*p_ifname) + strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ); + else + strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ); + if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) { + PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno)); + goto error; + } + + fcntl(tapfd, F_SETFL, O_NONBLOCK); + + if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) { + PMD_DRV_LOG(ERR, "TUNSETVNETHDRSZ failed: %s", strerror(errno)); + goto error; + } + + if (ioctl(tapfd, TUNSETSNDBUF, &sndbuf) < 0) { + PMD_DRV_LOG(ERR, "TUNSETSNDBUF failed: %s", strerror(errno)); + goto error; + } + + /* TODO: before set the offload capabilities, we'd better (1) check + * negotiated features to see if necessary to offload; (2) query tap + * to see if it supports the offload capabilities. + */ + if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0) + PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s", + strerror(errno)); + + if (!(*p_ifname)) + *p_ifname = strdup(ifr.ifr_name); + + return tapfd; +error: + close(tapfd); + return -1; +} diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h new file mode 100644 index 00000000..eae340cc --- /dev/null +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h @@ -0,0 +1,67 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/ioctl.h> + +/* TUN ioctls */ +#define TUNSETIFF _IOW('T', 202, int) +#define TUNGETFEATURES _IOR('T', 207, unsigned int) +#define TUNSETOFFLOAD _IOW('T', 208, unsigned int) +#define TUNGETIFF _IOR('T', 210, unsigned int) +#define TUNSETSNDBUF _IOW('T', 212, int) +#define TUNGETVNETHDRSZ _IOR('T', 215, int) +#define TUNSETVNETHDRSZ _IOW('T', 216, int) +#define TUNSETQUEUE _IOW('T', 217, int) +#define TUNSETVNETLE _IOW('T', 220, int) +#define TUNSETVNETBE _IOW('T', 222, int) + +/* TUNSETIFF ifr flags */ +#define IFF_TAP 0x0002 +#define IFF_NO_PI 0x1000 +#define IFF_ONE_QUEUE 0x2000 +#define IFF_VNET_HDR 0x4000 +#define IFF_MULTI_QUEUE 0x0100 +#define IFF_ATTACH_QUEUE 0x0200 +#define IFF_DETACH_QUEUE 0x0400 + +/* Features for GSO (TUNSETOFFLOAD). */ +#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ +#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ +#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ +#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +#define TUN_F_UFO 0x10 /* I can handle UFO packets */ + +/* Constants */ +#define PATH_NET_TUN "/dev/net/tun" + +int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq); diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c index 082e8217..4ad7b21b 100644 --- a/drivers/net/virtio/virtio_user/vhost_user.c +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -41,6 +41,39 @@ #include <errno.h> #include "vhost.h" +#include "virtio_user_dev.h" + +/* The version of the protocol we support */ +#define VHOST_USER_VERSION 0x1 + +#define VHOST_MEMORY_MAX_NREGIONS 8 +struct vhost_memory { + uint32_t nregions; + uint32_t padding; + struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS]; +}; + +struct vhost_user_msg { + enum vhost_user_request request; + +#define VHOST_USER_VERSION_MASK 0x3 +#define VHOST_USER_REPLY_MASK (0x1 << 2) + uint32_t flags; + uint32_t size; /* the following payload size */ + union { +#define VHOST_USER_VRING_IDX_MASK 0xff +#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) + uint64_t u64; + struct vhost_vring_state state; + struct vhost_vring_addr addr; + struct vhost_memory memory; + } payload; + int fds[VHOST_MEMORY_MAX_NREGIONS]; +} __attribute((packed)); + +#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64) +#define VHOST_USER_PAYLOAD_SIZE \ + (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE) static int vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num) @@ -223,24 +256,25 @@ prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[]) static struct vhost_user_msg m; -static const char * const vhost_msg_strings[] = { - [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER", - [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER", - [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES", - [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", - [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL", - [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM", - [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE", - [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE", - [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR", - [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK", - [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE", - [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", - NULL, +const char * const vhost_msg_strings[] = { + [VHOST_USER_SET_OWNER] = "VHOST_SET_OWNER", + [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER", + [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES", + [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES", + [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL", + [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM", + [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE", + [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE", + [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR", + [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK", + [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE", + [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE", }; -int -vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg) +static int +vhost_user_sock(struct virtio_user_dev *dev, + enum vhost_user_request req, + void *arg) { struct vhost_user_msg msg; struct vhost_vring_file *file = 0; @@ -248,9 +282,9 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg) int fds[VHOST_MEMORY_MAX_NREGIONS]; int fd_num = 0; int i, len; + int vhostfd = dev->vhostfd; RTE_SET_USED(m); - RTE_SET_USED(vhost_msg_strings); PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]); @@ -371,15 +405,13 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg) /** * Set up environment to talk with a vhost user backend. - * @param path - * - The path to vhost user unix socket file. * * @return - * - (-1) if fail to set up; - * - (>=0) if successful, and it is the fd to vhostfd. + * - (-1) if fail; + * - (0) if succeed. */ -int -vhost_user_setup(const char *path) +static int +vhost_user_setup(struct virtio_user_dev *dev) { int fd; int flag; @@ -397,18 +429,21 @@ vhost_user_setup(const char *path) memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; - snprintf(un.sun_path, sizeof(un.sun_path), "%s", path); + snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path); if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) { PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno)); close(fd); return -1; } - return fd; + dev->vhostfd = fd; + return 0; } -int -vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable) +static int +vhost_user_enable_queue_pair(struct virtio_user_dev *dev, + uint16_t pair_idx, + int enable) { int i; @@ -418,10 +453,15 @@ vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable) .num = enable, }; - if (vhost_user_sock(vhostfd, - VHOST_USER_SET_VRING_ENABLE, &state)) + if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state)) return -1; } return 0; } + +struct virtio_user_backend_ops ops_user = { + .setup = vhost_user_setup, + .send_request = vhost_user_sock, + .enable_qp = vhost_user_enable_queue_pair +}; diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index a38398b8..450404ba 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -39,6 +39,9 @@ #include <sys/mman.h> #include <unistd.h> #include <sys/eventfd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> #include "vhost.h" #include "virtio_user_dev.h" @@ -51,21 +54,11 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) * firstly because vhost depends on this msg to allocate virtqueue * pair. */ - int callfd; struct vhost_vring_file file; - /* May use invalid flag, but some backend leverages kickfd and callfd as - * criteria to judge if dev is alive. so finally we use real event_fd. - */ - callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - if (callfd < 0) { - PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno)); - return -1; - } file.index = queue_sel; - file.fd = callfd; - vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file); - dev->callfds[queue_sel] = callfd; + file.fd = dev->callfds[queue_sel]; + dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file); return 0; } @@ -73,7 +66,6 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) static int virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) { - int kickfd; struct vhost_vring_file file; struct vhost_vring_state state; struct vring *vring = &dev->vrings[queue_sel]; @@ -88,26 +80,21 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) state.index = queue_sel; state.num = vring->num; - vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state); + dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); + state.index = queue_sel; state.num = 0; /* no reservation */ - vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_BASE, &state); + dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); - vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_ADDR, &addr); + dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes * lastly because vhost depends on this msg to judge if * virtio is ready. */ - kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - if (kickfd < 0) { - PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno)); - return -1; - } file.index = queue_sel; - file.fd = kickfd; - vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file); - dev->kickfds[queue_sel] = kickfd; + file.fd = dev->kickfds[queue_sel]; + dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file); return 0; } @@ -146,22 +133,20 @@ virtio_user_start_device(struct virtio_user_dev *dev) if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0) goto error; - /* Step 1: set features - * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled, - * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped. - */ + /* Step 1: set features */ features = dev->features; - if (dev->max_queue_pairs > 1) - features |= VHOST_USER_MQ; + /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */ features &= ~(1ull << VIRTIO_NET_F_MAC); + /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */ features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); - ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features); + features &= ~(1ull << VIRTIO_NET_F_STATUS); + ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features); if (ret < 0) goto error; PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); /* Step 2: share memory regions */ - ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL); + ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); if (ret < 0) goto error; @@ -172,7 +157,7 @@ virtio_user_start_device(struct virtio_user_dev *dev) /* Step 4: enable queues * we enable the 1st queue pair by default. */ - vhost_user_enable_queue_pair(dev->vhostfd, 0, 1); + dev->ops->enable_qp(dev, 0, 1); return 0; error: @@ -184,13 +169,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev) { uint32_t i; - for (i = 0; i < dev->max_queue_pairs * 2; ++i) { - close(dev->callfds[i]); - close(dev->kickfds[i]); - } - for (i = 0; i < dev->max_queue_pairs; ++i) - vhost_user_enable_queue_pair(dev->vhostfd, i, 0); + dev->ops->enable_qp(dev, i, 0); return 0; } @@ -217,35 +197,170 @@ parse_mac(struct virtio_user_dev *dev, const char *mac) } int -virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, - int cq, int queue_size, const char *mac) +is_vhost_user_by_type(const char *path) +{ + struct stat sb; + + if (stat(path, &sb) == -1) + return 0; + + return S_ISSOCK(sb.st_mode); +} + +static int +virtio_user_dev_init_notify(struct virtio_user_dev *dev) +{ + uint32_t i, j; + int callfd; + int kickfd; + + for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) { + if (i >= dev->max_queue_pairs * 2) { + dev->kickfds[i] = -1; + dev->callfds[i] = -1; + continue; + } + + /* May use invalid flag, but some backend uses kickfd and + * callfd as criteria to judge if dev is alive. so finally we + * use real event_fd. + */ + callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (callfd < 0) { + PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno)); + break; + } + kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (kickfd < 0) { + PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno)); + break; + } + dev->callfds[i] = callfd; + dev->kickfds[i] = kickfd; + } + + if (i < VIRTIO_MAX_VIRTQUEUES) { + for (j = 0; j <= i; ++j) { + close(dev->callfds[j]); + close(dev->kickfds[j]); + } + + return -1; + } + + return 0; +} + +static int +virtio_user_fill_intr_handle(struct virtio_user_dev *dev) { uint32_t i; + struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; + if (!eth_dev->intr_handle) { + eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle)); + if (!eth_dev->intr_handle) { + PMD_DRV_LOG(ERR, "fail to allocate intr_handle"); + return -1; + } + memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle)); + } + + for (i = 0; i < dev->max_queue_pairs; ++i) + eth_dev->intr_handle->efds[i] = dev->callfds[i]; + eth_dev->intr_handle->nb_efd = dev->max_queue_pairs; + eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1; + eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; + if (dev->vhostfd >= 0) + eth_dev->intr_handle->fd = dev->vhostfd; + + return 0; +} + +static int +virtio_user_dev_setup(struct virtio_user_dev *dev) +{ + uint32_t q; + + dev->vhostfd = -1; + dev->vhostfds = NULL; + dev->tapfds = NULL; + + if (is_vhost_user_by_type(dev->path)) { + dev->ops = &ops_user; + } else { + dev->ops = &ops_kernel; + + dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int)); + dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int)); + if (!dev->vhostfds || !dev->tapfds) { + PMD_INIT_LOG(ERR, "Failed to malloc"); + return -1; + } + + for (q = 0; q < dev->max_queue_pairs; ++q) { + dev->vhostfds[q] = -1; + dev->tapfds[q] = -1; + } + } + + if (dev->ops->setup(dev) < 0) + return -1; + + if (virtio_user_dev_init_notify(dev) < 0) + return -1; + + if (virtio_user_fill_intr_handle(dev) < 0) + return -1; + + return 0; +} + +/* Use below macro to filter features from vhost backend */ +#define VIRTIO_USER_SUPPORTED_FEATURES \ + (1ULL << VIRTIO_NET_F_MAC | \ + 1ULL << VIRTIO_NET_F_STATUS | \ + 1ULL << VIRTIO_NET_F_MQ | \ + 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \ + 1ULL << VIRTIO_NET_F_CTRL_VQ | \ + 1ULL << VIRTIO_NET_F_CTRL_RX | \ + 1ULL << VIRTIO_NET_F_CTRL_VLAN | \ + 1ULL << VIRTIO_NET_F_CSUM | \ + 1ULL << VIRTIO_NET_F_HOST_TSO4 | \ + 1ULL << VIRTIO_NET_F_HOST_TSO6 | \ + 1ULL << VIRTIO_NET_F_MRG_RXBUF | \ + 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \ + 1ULL << VIRTIO_NET_F_GUEST_CSUM | \ + 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ + 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ + 1ULL << VIRTIO_F_VERSION_1) + +int +virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac, char **ifname) +{ snprintf(dev->path, PATH_MAX, "%s", path); dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ dev->queue_size = queue_size; dev->mac_specified = 0; parse_mac(dev, mac); - dev->vhostfd = -1; - for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) { - dev->kickfds[i] = -1; - dev->callfds[i] = -1; + if (*ifname) { + dev->ifname = *ifname; + *ifname = NULL; } - dev->vhostfd = vhost_user_setup(dev->path); - if (dev->vhostfd < 0) { + if (virtio_user_dev_setup(dev) < 0) { PMD_INIT_LOG(ERR, "backend set up fails"); return -1; } - if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) { + if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) { PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno)); return -1; } - if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES, + if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, &dev->device_features) < 0) { PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno)); return -1; @@ -268,12 +383,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); } - if (dev->max_queue_pairs > 1) { - if (!(dev->features & VHOST_USER_MQ)) { - PMD_INIT_LOG(ERR, "MQ not supported by the backend"); - return -1; - } - } + /* The backend will not report this feature, we add it explicitly */ + if (is_vhost_user_by_type(dev->path)) + dev->device_features |= (1ull << VIRTIO_NET_F_STATUS); + + dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES; return 0; } @@ -281,7 +395,25 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, void virtio_user_dev_uninit(struct virtio_user_dev *dev) { + uint32_t i; + + virtio_user_stop_device(dev); + + for (i = 0; i < dev->max_queue_pairs * 2; ++i) { + close(dev->callfds[i]); + close(dev->kickfds[i]); + } + close(dev->vhostfd); + + if (dev->vhostfds) { + for (i = 0; i < dev->max_queue_pairs; ++i) + close(dev->vhostfds[i]); + free(dev->vhostfds); + free(dev->tapfds); + } + + free(dev->ifname); } static uint8_t @@ -297,9 +429,9 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) } for (i = 0; i < q_pairs; ++i) - ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1); + ret |= dev->ops->enable_qp(dev, i, 1); for (i = q_pairs; i < dev->max_queue_pairs; ++i) - ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0); + ret |= dev->ops->enable_qp(dev, i, 0); dev->queue_pairs = q_pairs; diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h index 28fc788e..8361b6bd 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h @@ -37,11 +37,20 @@ #include <limits.h> #include "../virtio_pci.h" #include "../virtio_ring.h" +#include "vhost.h" struct virtio_user_dev { + /* for vhost_user backend */ int vhostfd; - int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; - int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; + + /* for vhost_kernel backend */ + char *ifname; + int *vhostfds; + int *tapfds; + + /* for both vhost_user and vhost_kernel */ + int callfds[VIRTIO_MAX_VIRTQUEUES]; + int kickfds[VIRTIO_MAX_VIRTQUEUES]; int mac_specified; uint32_t max_queue_pairs; uint32_t queue_pairs; @@ -51,15 +60,18 @@ struct virtio_user_dev { */ uint64_t device_features; /* supported features by device */ uint8_t status; + uint8_t port_id; uint8_t mac_addr[ETHER_ADDR_LEN]; char path[PATH_MAX]; - struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; + struct vring vrings[VIRTIO_MAX_VIRTQUEUES]; + struct virtio_user_backend_ops *ops; }; +int is_vhost_user_by_type(const char *path); int virtio_user_start_device(struct virtio_user_dev *dev); int virtio_user_stop_device(struct virtio_user_dev *dev); int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, - int cq, int queue_size, const char *mac); + int cq, int queue_size, const char *mac, char **ifname); void virtio_user_dev_uninit(struct virtio_user_dev *dev); void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); #endif |