aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio')
-rw-r--r--drivers/net/virtio/virtio_ethdev.c78
-rw-r--r--drivers/net/virtio/virtio_ethdev.h17
-rw-r--r--drivers/net/virtio/virtio_pci.h12
-rw-r--r--drivers/net/virtio/virtio_rxtx.c691
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c67
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h49
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c30
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h4
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c49
-rw-r--r--drivers/net/virtio/virtqueue.c8
-rw-r--r--drivers/net/virtio/virtqueue.h2
11 files changed, 737 insertions, 270 deletions
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index df50a571..614357da 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1320,6 +1320,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using mergeable buffer Rx path on port %u",
@@ -1331,10 +1336,10 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_simple_tx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
} else {
PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
eth_dev->data->port_id);
@@ -1781,9 +1786,7 @@ static struct rte_pci_driver rte_virtio_pmd = {
.remove = eth_virtio_pci_remove,
};
-RTE_INIT(rte_virtio_pmd_init);
-static void
-rte_virtio_pmd_init(void)
+RTE_INIT(rte_virtio_pmd_init)
{
if (rte_eal_iopl_init() != 0) {
PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
@@ -1793,6 +1796,22 @@ rte_virtio_pmd_init(void)
rte_pci_register(&rte_virtio_pmd);
}
+static bool
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
/*
* Configure virtio device
* It returns 0 on success.
@@ -1801,8 +1820,10 @@ static int
virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
uint64_t rx_offloads = rxmode->offloads;
+ uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
int ret;
@@ -1824,6 +1845,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << VIRTIO_NET_F_CSUM);
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+
/* if request features changed, reinit the device */
if (req_features != hw->req_guest_features) {
ret = virtio_init_device(dev, req_features);
@@ -1861,6 +1891,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -ENOTSUP;
}
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* Enable vector (0) for Link State Intrerrupt */
if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
@@ -1872,21 +1905,30 @@ virtio_dev_configure(struct rte_eth_dev *dev)
rte_spinlock_init(&hw->state_lock);
hw->use_simple_rx = 1;
- hw->use_simple_tx = 1;
+
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ hw->use_inorder_tx = 1;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_inorder_rx = 1;
+ hw->use_simple_rx = 0;
+ } else {
+ hw->use_inorder_rx = 0;
+ }
+ }
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_simple_rx = 0;
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_VLAN_STRIP))
hw->use_simple_rx = 0;
return 0;
@@ -2122,12 +2164,10 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
- };
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -2142,14 +2182,14 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
- if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+ if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
- if ((hw->guest_features & tso_mask) == tso_mask)
+ if ((host_features & tso_mask) == tso_mask)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
@@ -2168,9 +2208,7 @@ RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(virtio_init_log);
-static void
-virtio_init_log(void)
+RTE_INIT(virtio_init_log)
{
virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
if (virtio_logtype_init >= 0)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index bb40064e..b726ad10 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -28,14 +28,12 @@
1u << VIRTIO_NET_F_CTRL_VQ | \
1u << VIRTIO_NET_F_CTRL_RX | \
1u << VIRTIO_NET_F_CTRL_VLAN | \
- 1u << VIRTIO_NET_F_CSUM | \
- 1u << VIRTIO_NET_F_HOST_TSO4 | \
- 1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
1u << VIRTIO_NET_F_MTU | \
1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
@@ -44,13 +42,6 @@
1u << VIRTIO_NET_F_GUEST_TSO4 | \
1u << VIRTIO_NET_F_GUEST_TSO6)
-#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \
- (DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_VLAN_STRIP)
-
/*
* CQ function prototype
*/
@@ -83,9 +74,15 @@ uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index a28ba833..58fdd3d4 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -6,6 +6,7 @@
#define _VIRTIO_PCI_H_
#include <stdint.h>
+#include <stdbool.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -121,6 +122,12 @@ struct virtnet_ctl;
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 34
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
@@ -232,7 +239,10 @@ struct virtio_hw {
uint8_t use_msix;
uint8_t modern;
uint8_t use_simple_rx;
- uint8_t use_simple_tx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ bool has_tx_offload;
+ bool has_rx_offload;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 92fab217..eb891433 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -48,6 +48,13 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
}
void
+vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
+{
+ vq->vq_free_cnt += num;
+ vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
+}
+
+void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
@@ -115,6 +122,44 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
return i;
}
+static uint16_t
+virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx = 0;
+ uint16_t i;
+
+ if (unlikely(num == 0))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ /* Desc idx same as used idx */
+ uep = &vq->vq_ring.used->ring[used_idx];
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq->vq_descx[used_idx].cookie = NULL;
+ }
+
+ vq_ring_free_inorder(vq, used_idx, i);
+ return i;
+}
+
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif
@@ -143,6 +188,83 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
}
+/* Cleanup from completed inorder transmits. */
+static void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+ uint16_t i, used_idx, desc_idx = 0, last_idx;
+ int16_t free_cnt = 0;
+ struct vq_desc_extra *dxp = NULL;
+
+ if (unlikely(num == 0))
+ return;
+
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ last_idx = desc_idx + dxp->ndescs - 1;
+ free_cnt = last_idx - vq->vq_desc_tail_idx;
+ if (free_cnt <= 0)
+ free_cnt += vq->vq_nentries;
+
+ vq_ring_free_inorder(vq, last_idx, free_cnt);
+}
+
+static inline int
+virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t head_idx, idx, i = 0;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = head_idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookies[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookies[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+
+ vq_update_avail_ring(vq, idx);
+ head_idx++;
+ i++;
+ }
+
+ vq->vq_desc_head_idx += num;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
static inline int
virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
@@ -225,13 +347,6 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
}
}
-static inline int
-tx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
-}
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
@@ -240,8 +355,111 @@ tx_offload_enabled(struct virtio_hw *hw)
} while (0)
static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+ struct rte_mbuf *cookie,
+ bool offload)
+{
+ if (offload) {
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct udp_hdr,
+ dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
+
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ virtio_tso_fix_cksum(cookie);
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len =
+ cookie->l2_len +
+ cookie->l3_len +
+ cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ }
+}
+
+static inline void
+virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc *start_dp;
+ struct virtio_net_hdr *hdr;
+ uint16_t idx;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ uint16_t i = 0;
+
+ idx = vq->vq_desc_head_idx;
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookies[i], head_size);
+ cookies[i]->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+
+ virtqueue_xmit_offload(hdr, cookies[i],
+ vq->hw->has_tx_offload);
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+ start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].flags = 0;
+
+ vq_update_avail_ring(vq, idx);
+
+ idx++;
+ i++;
+ };
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
+}
+
+static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- uint16_t needed, int use_indirect, int can_push)
+ uint16_t needed, int use_indirect, int can_push,
+ int in_order)
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
@@ -251,9 +469,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- int offload;
- offload = tx_offload_enabled(vq->hw);
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
dxp = &vq->vq_descx[idx];
@@ -270,8 +486,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* which is wrong. Below subtract restores correct pkt size.
*/
cookie->pkt_len -= head_size;
+
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0) {
+ if (!vq->hw->has_tx_offload) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
@@ -308,49 +525,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
idx = start_dp[idx].next;
}
- /* Checksum Offload / TSO */
- if (offload) {
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
- switch (cookie->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
- dgram_cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- default:
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- break;
- }
-
- /* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
- VIRTIO_NET_HDR_GSO_TCPV6 :
- VIRTIO_NET_HDR_GSO_TCPV4;
- hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len =
- cookie->l2_len +
- cookie->l3_len +
- cookie->l4_len;
- } else {
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
- }
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
@@ -362,11 +537,15 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
if (use_indirect)
idx = vq->vq_ring.desc[head_idx].next;
- vq->vq_desc_head_idx = idx;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = idx;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = idx;
vq_update_avail_ring(vq, head_idx);
+
+ if (!in_order) {
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ }
}
void
@@ -421,7 +600,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
struct virtnet_rx *rxvq = &vq->rxq;
struct rte_mbuf *m;
uint16_t desc_idx;
- int error, nbufs;
+ int error, nbufs, i;
PMD_INIT_FUNC_TRACE();
@@ -451,6 +630,25 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
+ } else if (hw->use_inorder_rx) {
+ if ((!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
+ free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq,
+ pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+
+ nbufs += free_cnt;
+ vq_update_avail_idx(vq);
+ }
} else {
while (!virtqueue_full(vq)) {
m = rte_mbuf_raw_alloc(rxvq->mpool);
@@ -498,10 +696,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- /* cannot use simple rxtx funcs with multisegs or offloads */
- if (dev->data->dev_conf.txmode.offloads)
- hw->use_simple_tx = 0;
-
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -536,31 +730,11 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
- uint16_t mid_idx = vq->vq_nentries >> 1;
- struct virtnet_tx *txvq = &vq->txq;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- if (hw->use_simple_tx) {
- for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] =
- desc_idx + mid_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].next =
- desc_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].addr =
- txvq->virtio_net_hdr_mem +
- offsetof(struct virtio_tx_region, tx_hdr);
- vq->vq_ring.desc[desc_idx + mid_idx].len =
- vq->hw->vtnet_hdr_size;
- vq->vq_ring.desc[desc_idx + mid_idx].flags =
- VRING_DESC_F_NEXT;
- vq->vq_ring.desc[desc_idx].flags = 0;
- }
- for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
- desc_idx++)
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- }
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
VIRTQUEUE_DUMP(vq);
@@ -576,6 +750,19 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* successful since it was just dequeued.
*/
error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void
+virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+
+ error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
@@ -614,6 +801,15 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
}
}
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ rxvq->stats.bytes += m->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
/* Optionally fill offload information in structure */
static int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -686,14 +882,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
return 0;
}
-static inline int
-rx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
-}
-
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
@@ -709,7 +897,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int error;
uint32_t i, nb_enqueued;
uint32_t hdr_size;
- int offload;
struct virtio_net_hdr *hdr;
nb_rx = 0;
@@ -731,7 +918,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
@@ -760,24 +946,20 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (hw->vlan_strip)
rte_vlan_strip(rxm);
- if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
}
- VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
+ virtio_rx_stats_updated(rxvq, rxm);
rx_pkts[nb_rx++] = rxm;
-
- rxvq->stats.bytes += rxm->pkt_len;
- virtio_update_packet_stats(&rxvq->stats, rxm);
}
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -807,6 +989,193 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
uint16_t
+virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t nb_enqueued;
+ uint32_t seg_num;
+ uint32_t seg_res;
+ uint32_t hdr_size;
+ int32_t i;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+ nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
+
+ virtio_rmb();
+
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
+
+ nb_enqueued = 0;
+ seg_num = 1;
+ seg_res = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)
+ ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
+ - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (vq->hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+
+ prev = rcv_pkts[nb_rx];
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
+ rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ };
+ seg_res -= rcv_cnt;
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ virtio_discard_rxbuf_inorder(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
virtio_recv_mergeable_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -825,7 +1194,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
- int offload;
nb_rx = 0;
if (unlikely(hw->started == 0))
@@ -843,7 +1211,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
extra_idx = 0;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
@@ -888,7 +1255,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rx_pkts[nb_rx] = rxm;
prev = rxm;
- if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
@@ -950,7 +1318,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -1053,12 +1420,124 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Enqueue Packet buffers */
- virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
+ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
+ can_push, 0);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
+uint16_t
+virtio_xmit_pkts_inorder(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ struct rte_mbuf *inorder_pkts[nb_pkts];
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ VIRTQUEUE_DUMP(vq);
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ if (unlikely(!vq->vq_free_cnt))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
+ inorder_pkts[nb_inorder_pkts] = txm;
+ nb_inorder_pkts++;
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ continue;
+ }
+
+ if (nb_inorder_pkts) {
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+ nb_inorder_pkts = 0;
+ }
+
+ slots = txm->nb_segs + 1;
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, need);
+
+ need = slots - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
txvq->stats.bytes += txm->pkt_len;
virtio_update_packet_stats(&txvq->stats, txm);
}
+ /* Transmit all inorder packets */
+ if (nb_inorder_pkts)
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+
txvq->stats.packets += nb_tx;
if (likely(nb_tx)) {
@@ -1070,5 +1549,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
}
+ VIRTQUEUE_DUMP(vq);
+
return nb_tx;
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 51520758..31e565b4 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -27,73 +27,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-uint16_t
-virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
- struct virtio_hw *hw = vq->hw;
- uint16_t nb_used;
- uint16_t desc_idx;
- struct vring_desc *start_dp;
- uint16_t nb_tail, nb_commit;
- int i;
- uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
- uint16_t nb_tx = 0;
-
- if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
- return nb_tx;
-
- nb_used = VIRTQUEUE_NUSED(vq);
- rte_compiler_barrier();
-
- if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup_simple(vq);
-
- nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
- desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
- start_dp = vq->vq_ring.desc;
- nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
-
- if (nb_commit >= nb_tail) {
- for (i = 0; i < nb_tail; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_tail; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
- nb_commit -= nb_tail;
- desc_idx = 0;
- }
- for (i = 0; i < nb_commit; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_commit; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
-
- rte_compiler_barrier();
-
- vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
- vq->vq_avail_idx += nb_pkts;
- vq->vq_ring.avail->idx = vq->vq_avail_idx;
- txvq->stats.packets += nb_pkts;
-
- if (likely(nb_pkts)) {
- if (unlikely(virtqueue_kick_prepare(vq)))
- virtqueue_notify(vq);
- }
-
- return nb_pkts;
-}
-
int __attribute__((cold))
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index 303904d6..dc97e4cc 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -55,53 +55,4 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
vq_update_avail_idx(vq);
}
-#define VIRTIO_TX_FREE_THRESH 32
-#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
-#define VIRTIO_TX_FREE_NR 32
-/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
-static inline void
-virtio_xmit_cleanup_simple(struct virtqueue *vq)
-{
- uint16_t i, desc_idx;
- uint32_t nb_free = 0;
- struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
-
- desc_idx = (uint16_t)(vq->vq_used_cons_idx &
- ((vq->vq_nentries >> 1) - 1));
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void **)free,
- RTE_MIN(RTE_DIM(free),
- nb_free));
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free,
- RTE_MIN(RTE_DIM(free), nb_free));
- } else {
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
- vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
-}
-
#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 4322527f..7df600b0 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -371,11 +371,13 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_CSUM | \
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname)
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -384,6 +386,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
+ dev->unsupported_features = 0;
parse_mac(dev, mac);
if (*ifname) {
@@ -419,10 +422,22 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
- if (dev->mac_specified)
+ if (!mrg_rxbuf) {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MRG_RXBUF);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (!in_order) {
+ dev->device_features &= ~(1ull << VIRTIO_F_IN_ORDER);
+ dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
+ }
+
+ if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
- else
+ } else {
dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
@@ -437,6 +452,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
/* The backend will not report this feature, we add it explicitly */
@@ -444,6 +467,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ dev->unsupported_features |= ~VIRTIO_USER_SUPPORTED_FEATURES;
if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
virtio_user_mem_event_cb, dev)) {
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index d2d4cb82..d6e0e137 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -33,6 +33,7 @@ struct virtio_user_dev {
* and will be sync with device
*/
uint64_t device_features; /* supported features by device */
+ uint64_t unsupported_features; /* unsupported features mask */
uint8_t status;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
@@ -47,7 +48,8 @@ int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname);
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 1c102ca7..525d16ca 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -30,7 +30,6 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
int ret;
int flag;
int connectfd;
- uint64_t features = dev->device_features;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
connectfd = accept(dev->listenfd, NULL, NULL);
@@ -45,15 +44,8 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
return -1;
}
- features &= ~dev->device_features;
- /* For following bits, vhost-user doesn't really need to know */
- features &= ~(1ull << VIRTIO_NET_F_MAC);
- features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
- features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
- features &= ~(1ull << VIRTIO_NET_F_STATUS);
- if (features)
- PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!",
- features);
+ /* umask vhost-user unsupported features */
+ dev->device_features &= ~(dev->unsupported_features);
dev->features &= dev->device_features;
@@ -366,8 +358,12 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_QUEUE_SIZE,
#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
VIRTIO_USER_ARG_INTERFACE_NAME,
-#define VIRTIO_USER_ARG_SERVER_MODE "server"
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
VIRTIO_USER_ARG_SERVER_MODE,
+#define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
+ VIRTIO_USER_ARG_MRG_RXBUF,
+#define VIRTIO_USER_ARG_IN_ORDER "in_order"
+ VIRTIO_USER_ARG_IN_ORDER,
NULL
};
@@ -440,7 +436,8 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
hw->use_msix = 1;
hw->modern = 0;
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
return eth_dev;
}
@@ -470,6 +467,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+ uint64_t mrg_rxbuf = 1;
+ uint64_t in_order = 1;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -569,6 +568,24 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
goto end;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
+ &get_integer_arg, &mrg_rxbuf) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_MRG_RXBUF);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
+ &get_integer_arg, &in_order) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_IN_ORDER);
+ goto end;
+ }
+ }
+
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
struct virtio_user_dev *vu_dev;
@@ -585,7 +602,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
else
vu_dev->is_server = false;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr, &ifname) < 0) {
+ queue_size, mac_addr, &ifname, mrg_rxbuf,
+ in_order) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
@@ -663,4 +681,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"cq=<int> "
"queue_size=<int> "
"queues=<int> "
- "iface=<string>");
+ "iface=<string> "
+ "server=<0|1> "
+ "mrg_rxbuf=<0|1> "
+ "in_order=<0|1>");
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index a7d0a9cb..56a77cc7 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
+ } else if (hw->use_inorder_rx) {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_inorder(vq, desc_idx, 1);
} else {
desc_idx = (uint16_t)uep->id;
dxp = &vq->vq_descx[desc_idx];
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 14364f35..26518ed9 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -306,6 +306,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+ uint16_t num);
static inline void
vq_update_avail_idx(struct virtqueue *vq)