From ce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Thu, 2 Mar 2017 16:15:51 +0100 Subject: Imported Upstream version 16.11.1 Change-Id: I1e965265578efaaf08e5628607f53d2386d2df9f Signed-off-by: Christian Ehrhardt --- drivers/net/virtio/virtio_ethdev.c | 75 ++++++++++++++++++++-- drivers/net/virtio/virtio_ethdev.h | 5 ++ drivers/net/virtio/virtio_pci.c | 80 +++++++++++++----------- drivers/net/virtio/virtio_pci.h | 24 ++++++- drivers/net/virtio/virtio_rxtx.c | 28 ++++++--- drivers/net/virtio/virtio_user/virtio_user_dev.c | 60 ++++++++++-------- drivers/net/virtio/virtio_user/virtio_user_dev.h | 5 +- drivers/net/virtio/virtio_user_ethdev.c | 25 +++++--- drivers/net/virtio/virtqueue.h | 2 +- 9 files changed, 212 insertions(+), 92 deletions(-) (limited to 'drivers/net/virtio') diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 079fd6c8..f5961ab7 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -152,6 +152,8 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \ sizeof(rte_virtio_txq_stat_strings[0])) +struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; + static int virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num) @@ -360,7 +362,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) * Read the virtqueue size from the Queue Size field * Always power of 2 and if 0 virtqueue does not exist */ - vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx); + vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size); if (vq_size == 0) { PMD_INIT_LOG(ERR, "virtqueue does not exist"); @@ -519,7 +521,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) } } - if (hw->vtpci_ops->setup_queue(hw, vq) < 0) { + if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { PMD_INIT_LOG(ERR, "setup_queue failed"); return -EINVAL; } @@ -893,6 +895,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { xstats[count].value = *(uint64_t *)(((char *)rxvq) + rte_virtio_rxq_stat_strings[t].offset); + xstats[count].id = count; count++; } } @@ -908,6 +911,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { xstats[count].value = *(uint64_t *)(((char *)txvq) + rte_virtio_txq_stat_strings[t].offset); + xstats[count].id = count; count++; } } @@ -1114,7 +1118,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) req_features); /* Read device(host) feature bits */ - host_features = hw->vtpci_ops->get_features(hw); + host_features = VTPCI_OPS(hw)->get_features(hw); PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64, host_features); @@ -1204,14 +1208,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (virtio_negotiate_features(hw, req_features) < 0) return -1; + rte_eth_copy_pci_info(eth_dev, pci_dev); + /* If host does not support status then disable LSC */ if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; else eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; - rte_eth_copy_pci_info(eth_dev, pci_dev); - rx_func_get(eth_dev); /* Setting up rx_header size for the device */ @@ -1286,6 +1290,52 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) return 0; } +/* + * Remap the PCI device again (IO port map for legacy device and + * memory map for modern device), so that the secondary process + * could have the PCI initiated correctly. + */ +static int +virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw) +{ + if (hw->modern) { + /* + * We don't have to re-parse the PCI config space, since + * rte_eal_pci_map_device() makes sure the mapped address + * in secondary process would equal to the one mapped in + * the primary process: error will be returned if that + * requirement is not met. + * + * That said, we could simply reuse all cap pointers + * (such as dev_cfg, common_cfg, etc.) parsed from the + * primary process, which is stored in shared memory. + */ + if (rte_eal_pci_map_device(pci_dev)) { + PMD_INIT_LOG(DEBUG, "failed to map pci device!"); + return -1; + } + } else { + if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) + return -1; + } + + return 0; +} + +static void +virtio_set_vtpci_ops(struct virtio_hw *hw) +{ +#ifdef RTE_VIRTIO_USER + if (hw->virtio_user_dev) + VTPCI_OPS(hw) = &virtio_user_ops; + else +#endif + if (hw->modern) + VTPCI_OPS(hw) = &modern_ops; + else + VTPCI_OPS(hw) = &legacy_ops; +} + /* * This function is based on probe() function in virtio_pci.c * It returns 0 on success. @@ -1304,7 +1354,19 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = &virtio_xmit_pkts; if (rte_eal_process_type() == RTE_PROC_SECONDARY) { - rx_func_get(eth_dev); + if (!hw->virtio_user_dev) { + ret = virtio_remap_pci(eth_dev->pci_dev, hw); + if (ret) + return ret; + } + + virtio_set_vtpci_ops(hw); + if (hw->use_simple_rxtx) { + eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple; + eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; + } else { + rx_func_get(eth_dev); + } return 0; } @@ -1318,6 +1380,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) } pci_dev = eth_dev->pci_dev; + hw->port_id = eth_dev->data->port_id; if (pci_dev) { ret = vtpci_init(pci_dev, hw, &dev_flags); diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 27d9a190..4feccf93 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -70,6 +70,11 @@ 1ULL << VIRTIO_F_VERSION_1 | \ 1ULL << VIRTIO_F_IOMMU_PLATFORM) +#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \ + (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \ + 1u << VIRTIO_NET_F_GUEST_CSUM | \ + 1u << VIRTIO_NET_F_GUEST_TSO4 | \ + 1u << VIRTIO_NET_F_GUEST_TSO6) /* * CQ function prototype */ diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c index 9b47165d..8d5355c7 100644 --- a/drivers/net/virtio/virtio_pci.c +++ b/drivers/net/virtio/virtio_pci.c @@ -92,17 +92,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset, while (length > 0) { if (length >= 4) { size = 4; - rte_eal_pci_ioport_read(&hw->io, dst, size, + rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); } else if (length >= 2) { size = 2; - rte_eal_pci_ioport_read(&hw->io, dst, size, + rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); } else { size = 1; - rte_eal_pci_ioport_read(&hw->io, dst, size, + rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); } @@ -111,7 +111,7 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset, length -= size; } #else - rte_eal_pci_ioport_read(&hw->io, dst, length, + rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length, VIRTIO_PCI_CONFIG(hw) + offset); #endif } @@ -131,16 +131,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset, if (length >= 4) { size = 4; tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); - rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, VIRTIO_PCI_CONFIG(hw) + offset); } else if (length >= 2) { size = 2; tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); - rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, VIRTIO_PCI_CONFIG(hw) + offset); } else { size = 1; - rte_eal_pci_ioport_write(&hw->io, src, size, + rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size, VIRTIO_PCI_CONFIG(hw) + offset); } @@ -149,7 +149,7 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset, length -= size; } #else - rte_eal_pci_ioport_write(&hw->io, src, length, + rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length, VIRTIO_PCI_CONFIG(hw) + offset); #endif } @@ -159,7 +159,8 @@ legacy_get_features(struct virtio_hw *hw) { uint32_t dst; - rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES); + rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4, + VIRTIO_PCI_HOST_FEATURES); return dst; } @@ -171,7 +172,7 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features) "only 32 bit features are allowed for legacy virtio!"); return; } - rte_eal_pci_ioport_write(&hw->io, &features, 4, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4, VIRTIO_PCI_GUEST_FEATURES); } @@ -180,14 +181,14 @@ legacy_get_status(struct virtio_hw *hw) { uint8_t dst; - rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS); + rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); return dst; } static void legacy_set_status(struct virtio_hw *hw, uint8_t status) { - rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS); + rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); } static void @@ -201,7 +202,7 @@ legacy_get_isr(struct virtio_hw *hw) { uint8_t dst; - rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR); + rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); return dst; } @@ -211,8 +212,10 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) { uint16_t dst; - rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); - rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); + rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2, + VIRTIO_MSI_CONFIG_VECTOR); + rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, + VIRTIO_MSI_CONFIG_VECTOR); return dst; } @@ -221,8 +224,9 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) { uint16_t dst; - rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); - rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM); + rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, + VIRTIO_PCI_QUEUE_SEL); + rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); return dst; } @@ -234,10 +238,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) if (!check_vq_phys_addr_ok(vq)) return -1; - rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, VIRTIO_PCI_QUEUE_SEL); src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; - rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); + rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); return 0; } @@ -247,15 +251,15 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) { uint32_t src = 0; - rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, VIRTIO_PCI_QUEUE_SEL); - rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); + rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); } static void legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) { - rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, + rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, VIRTIO_PCI_QUEUE_NOTIFY); } @@ -289,7 +293,7 @@ static int legacy_virtio_resource_init(struct rte_pci_device *pci_dev, struct virtio_hw *hw, uint32_t *dev_flags) { - if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0) + if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) return -1; if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN) @@ -300,7 +304,7 @@ legacy_virtio_resource_init(struct rte_pci_device *pci_dev, return 0; } -static const struct virtio_pci_ops legacy_ops = { +const struct virtio_pci_ops legacy_ops = { .read_dev_cfg = legacy_read_dev_config, .write_dev_cfg = legacy_write_dev_config, .reset = legacy_reset, @@ -516,7 +520,7 @@ modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) io_write16(1, vq->notify_addr); } -static const struct virtio_pci_ops modern_ops = { +const struct virtio_pci_ops modern_ops = { .read_dev_cfg = modern_read_dev_config, .write_dev_cfg = modern_write_dev_config, .reset = modern_reset, @@ -537,14 +541,14 @@ void vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, void *dst, int length) { - hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length); + VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); } void vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, const void *src, int length) { - hw->vtpci_ops->write_dev_cfg(hw, offset, src, length); + VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); } uint64_t @@ -557,7 +561,7 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) * host all support. */ features = host_features & hw->guest_features; - hw->vtpci_ops->set_features(hw, features); + VTPCI_OPS(hw)->set_features(hw, features); return features; } @@ -565,9 +569,9 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) void vtpci_reset(struct virtio_hw *hw) { - hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); /* flush status write */ - hw->vtpci_ops->get_status(hw); + VTPCI_OPS(hw)->get_status(hw); } void @@ -580,21 +584,21 @@ void vtpci_set_status(struct virtio_hw *hw, uint8_t status) { if (status != VIRTIO_CONFIG_STATUS_RESET) - status |= hw->vtpci_ops->get_status(hw); + status |= VTPCI_OPS(hw)->get_status(hw); - hw->vtpci_ops->set_status(hw, status); + VTPCI_OPS(hw)->set_status(hw, status); } uint8_t vtpci_get_status(struct virtio_hw *hw) { - return hw->vtpci_ops->get_status(hw); + return VTPCI_OPS(hw)->get_status(hw); } uint8_t vtpci_isr(struct virtio_hw *hw) { - return hw->vtpci_ops->get_isr(hw); + return VTPCI_OPS(hw)->get_isr(hw); } @@ -602,7 +606,7 @@ vtpci_isr(struct virtio_hw *hw) uint16_t vtpci_irq_config(struct virtio_hw *hw, uint16_t vec) { - return hw->vtpci_ops->set_config_irq(hw, vec); + return VTPCI_OPS(hw)->set_config_irq(hw, vec); } static void * @@ -736,8 +740,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw, */ if (virtio_read_caps(dev, hw) == 0) { PMD_INIT_LOG(INFO, "modern virtio pci detected."); - hw->vtpci_ops = &modern_ops; - hw->modern = 1; + virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops; + hw->modern = 1; *dev_flags |= RTE_ETH_DEV_INTR_LSC; return 0; } @@ -755,7 +759,7 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw, return -1; } - hw->vtpci_ops = &legacy_ops; + virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; hw->use_msix = legacy_virtio_has_msix(&dev->addr); hw->modern = 0; diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h index de271bfe..511a1c87 100644 --- a/drivers/net/virtio/virtio_pci.h +++ b/drivers/net/virtio/virtio_pci.h @@ -245,7 +245,6 @@ struct virtio_net_config; struct virtio_hw { struct virtnet_ctl *cvq; - struct rte_pci_ioport io; uint64_t req_guest_features; uint64_t guest_features; uint32_t max_queue_pairs; @@ -254,6 +253,7 @@ struct virtio_hw { uint8_t use_msix; uint8_t modern; uint8_t use_simple_rxtx; + uint8_t port_id; uint8_t mac_addr[ETHER_ADDR_LEN]; uint32_t notify_off_multiplier; uint8_t *isr; @@ -261,12 +261,28 @@ struct virtio_hw { struct rte_pci_device *dev; struct virtio_pci_common_cfg *common_cfg; struct virtio_net_config *dev_cfg; - const struct virtio_pci_ops *vtpci_ops; void *virtio_user_dev; struct virtqueue **vqs; }; + +/* + * While virtio_hw is stored in shared memory, this structure stores + * some infos that may vary in the multiple process model locally. + * For example, the vtpci_ops pointer. + */ +struct virtio_hw_internal { + const struct virtio_pci_ops *vtpci_ops; + struct rte_pci_ioport io; +}; + +#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops) +#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io) + +extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; + + /* * This structure is just a reference to read * net device specific config space; it just a chodu structure @@ -317,4 +333,8 @@ uint8_t vtpci_isr(struct virtio_hw *); uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t); +extern const struct virtio_pci_ops legacy_ops; +extern const struct virtio_pci_ops modern_ops; +extern const struct virtio_pci_ops virtio_user_ops; + #endif /* _VIRTIO_PCI_H_ */ diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 22d97a4e..a33ef1a8 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -258,6 +258,12 @@ tx_offload_enabled(struct virtio_hw *hw) vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6); } +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) @@ -286,8 +292,14 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, hdr = (struct virtio_net_hdr *) rte_pktmbuf_prepend(cookie, head_size); /* if offload disabled, it is not zeroed below, do it now */ - if (offload == 0) - memset(hdr, 0, head_size); + if (offload == 0) { + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } } else if (use_indirect) { /* setup tx ring slot to point to indirect * descriptor list stored in reserved region. @@ -337,9 +349,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, break; default: - hdr->csum_start = 0; - hdr->csum_offset = 0; - hdr->flags = 0; + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); break; } @@ -355,9 +367,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, cookie->l3_len + cookie->l4_len; } else { - hdr->gso_type = 0; - hdr->gso_size = 0; - hdr->hdr_len = 0; + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); } } diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index e239e0eb..a38398b8 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -148,12 +148,13 @@ virtio_user_start_device(struct virtio_user_dev *dev) /* Step 1: set features * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled, - * and VIRTIO_NET_F_MAC is stripped. + * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped. */ features = dev->features; if (dev->max_queue_pairs > 1) features |= VHOST_USER_MQ; features &= ~(1ull << VIRTIO_NET_F_MAC); + features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features); if (ret < 0) goto error; @@ -181,7 +182,17 @@ error: int virtio_user_stop_device(struct virtio_user_dev *dev) { - return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL); + uint32_t i; + + for (i = 0; i < dev->max_queue_pairs * 2; ++i) { + close(dev->callfds[i]); + close(dev->kickfds[i]); + } + + for (i = 0; i < dev->max_queue_pairs; ++i) + vhost_user_enable_queue_pair(dev->vhostfd, i, 0); + + return 0; } static inline void @@ -209,6 +220,8 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac) { + uint32_t i; + snprintf(dev->path, PATH_MAX, "%s", path); dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ @@ -217,6 +230,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, parse_mac(dev, mac); dev->vhostfd = -1; + for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) { + dev->kickfds[i] = -1; + dev->callfds[i] = -1; + } + dev->vhostfd = vhost_user_setup(dev->path); if (dev->vhostfd < 0) { PMD_INIT_LOG(ERR, "backend set up fails"); @@ -228,29 +246,26 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, } if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES, - &dev->features) < 0) { + &dev->device_features) < 0) { PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno)); return -1; } if (dev->mac_specified) - dev->features |= (1ull << VIRTIO_NET_F_MAC); + dev->device_features |= (1ull << VIRTIO_NET_F_MAC); - if (!cq) { - dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); - /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */ - dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX); - dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN); - dev->features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); - dev->features &= ~(1ull << VIRTIO_NET_F_MQ); - dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); - } else { - /* vhost user backend does not need to know ctrl-q, so - * actually we need add this bit into features. However, - * DPDK vhost-user does send features with this bit, so we - * check it instead of OR it for now. + if (cq) { + /* device does not really need to know anything about CQ, + * so if necessary, we just claim to support CQ */ - if (!(dev->features & (1ull << VIRTIO_NET_F_CTRL_VQ))) - PMD_INIT_LOG(INFO, "vhost does not support ctrl-q"); + dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ); + } else { + dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); + /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */ + dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX); + dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN); + dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); + dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ); + dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); } if (dev->max_queue_pairs > 1) { @@ -266,13 +281,6 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, void virtio_user_dev_uninit(struct virtio_user_dev *dev) { - uint32_t i; - - for (i = 0; i < dev->max_queue_pairs * 2; ++i) { - close(dev->callfds[i]); - close(dev->kickfds[i]); - } - close(dev->vhostfd); } diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h index 33690b5c..28fc788e 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h @@ -46,7 +46,10 @@ struct virtio_user_dev { uint32_t max_queue_pairs; uint32_t queue_pairs; uint32_t queue_size; - uint64_t features; + uint64_t features; /* the negotiated features with driver, + * and will be sync with device + */ + uint64_t device_features; /* supported features by device */ uint8_t status; uint8_t mac_addr[ETHER_ADDR_LEN]; char path[PATH_MAX]; diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index 406beeac..013600e4 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -87,21 +87,24 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, } static void -virtio_user_set_status(struct virtio_hw *hw, uint8_t status) +virtio_user_reset(struct virtio_hw *hw) { struct virtio_user_dev *dev = virtio_user_get_dev(hw); - if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) - virtio_user_start_device(dev); - dev->status = status; + if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK) + virtio_user_stop_device(dev); } static void -virtio_user_reset(struct virtio_hw *hw) +virtio_user_set_status(struct virtio_hw *hw, uint8_t status) { struct virtio_user_dev *dev = virtio_user_get_dev(hw); - virtio_user_stop_device(dev); + if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) + virtio_user_start_device(dev); + else if (status == VIRTIO_CONFIG_STATUS_RESET) + virtio_user_reset(hw); + dev->status = status; } static uint8_t @@ -117,7 +120,8 @@ virtio_user_get_features(struct virtio_hw *hw) { struct virtio_user_dev *dev = virtio_user_get_dev(hw); - return dev->features; + /* unmask feature bits defined in vhost user protocol */ + return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES; } static void @@ -125,7 +129,7 @@ virtio_user_set_features(struct virtio_hw *hw, uint64_t features) { struct virtio_user_dev *dev = virtio_user_get_dev(hw); - dev->features = features; + dev->features = features & dev->device_features; } static uint8_t @@ -212,7 +216,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) strerror(errno)); } -static const struct virtio_pci_ops virtio_user_ops = { +const struct virtio_pci_ops virtio_user_ops = { .read_dev_cfg = virtio_user_read_dev_config, .write_dev_cfg = virtio_user_write_dev_config, .reset = virtio_user_reset, @@ -301,7 +305,8 @@ virtio_user_eth_dev_alloc(const char *name) return NULL; } - hw->vtpci_ops = &virtio_user_ops; + hw->port_id = data->port_id; + virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops; hw->use_msix = 0; hw->modern = 0; hw->use_simple_rxtx = 0; diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index f0bb0899..b1070e05 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -330,7 +330,7 @@ virtqueue_notify(struct virtqueue *vq) * For virtio on IA, the notificaiton is through io port operation * which is a serialization instruction itself. */ - vq->hw->vtpci_ops->notify_queue(vq->hw, vq); + VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); } #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP -- cgit 1.2.3-korg