aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_vhost
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /lib/librte_vhost
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib/librte_vhost')
-rw-r--r--lib/librte_vhost/rte_vhost.h16
-rw-r--r--lib/librte_vhost/rte_vhost_version.map7
-rw-r--r--lib/librte_vhost/socket.c46
-rw-r--r--lib/librte_vhost/vhost.c32
-rw-r--r--lib/librte_vhost/vhost.h19
-rw-r--r--lib/librte_vhost/vhost_user.c73
-rw-r--r--lib/librte_vhost/virtio_net.c58
7 files changed, 183 insertions, 68 deletions
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 605e47cb..8c974eb1 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -120,7 +120,7 @@ struct vhost_device_ops {
* @return
* the host virtual address on success, 0 on failure
*/
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
{
struct rte_vhost_mem_region *reg;
@@ -365,7 +365,7 @@ struct rte_mempool;
/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtual device. A packet
- * count is returned to indicate the number of packets that were succesfully
+ * count is returned to indicate the number of packets that were successfully
* added to the RX queue.
* @param vid
* vhost device ID
@@ -432,6 +432,18 @@ int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct rte_vhost_vring *vring);
+/**
+ * Get vhost RX queue avail count.
+ *
+ * @param vid
+ * vhost device ID
+ * @param qid
+ * virtio queue index in mq case
+ * @return
+ * num of desc available
+ */
+uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 07858732..1e704953 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -45,3 +45,10 @@ DPDK_17.05 {
rte_vhost_log_write;
} DPDK_16.07;
+
+DPDK_17.08 {
+ global:
+
+ rte_vhost_rx_queue_count;
+
+} DPDK_17.05;
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index c7f99b08..41aa3f9b 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -445,13 +445,22 @@ vhost_user_reconnect_init(void)
{
int ret;
- pthread_mutex_init(&reconn_list.mutex, NULL);
+ ret = pthread_mutex_init(&reconn_list.mutex, NULL);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "failed to initialize mutex");
+ return ret;
+ }
TAILQ_INIT(&reconn_list.head);
ret = pthread_create(&reconn_tid, NULL,
vhost_user_client_reconnect, NULL);
- if (ret < 0)
+ if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
+ if (pthread_mutex_destroy(&reconn_list.mutex)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to destroy reconnect mutex");
+ }
+ }
return ret;
}
@@ -613,8 +622,19 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
goto out;
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
+ if (vsocket->path == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to copy socket path string\n");
+ free(vsocket);
+ goto out;
+ }
TAILQ_INIT(&vsocket->conn_list);
- pthread_mutex_init(&vsocket->conn_mutex, NULL);
+ ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to init connection mutex\n");
+ goto out_free;
+ }
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
/*
@@ -636,9 +656,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
if (vhost_user_reconnect_init() < 0) {
- free(vsocket->path);
- free(vsocket);
- goto out;
+ goto out_mutex;
}
}
} else {
@@ -646,13 +664,22 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
}
ret = create_unix_socket(vsocket);
if (ret < 0) {
- free(vsocket->path);
- free(vsocket);
- goto out;
+ goto out_mutex;
}
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+
+out_mutex:
+ if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to destroy connection mutex\n");
+ }
+out_free:
+ free(vsocket->path);
+ free(vsocket);
out:
pthread_mutex_unlock(&vhost_user.mutex);
@@ -724,6 +751,7 @@ rte_vhost_driver_unregister(const char *path)
}
pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_destroy(&vsocket->conn_mutex);
free(vsocket->path);
free(vsocket);
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 1f565fbb..0b6aa1cc 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -40,6 +40,7 @@
#include <numaif.h>
#endif
+#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_string_fns.h>
@@ -272,7 +273,7 @@ rte_vhost_get_mtu(int vid, uint16_t *mtu)
if (!(dev->flags & VIRTIO_DEV_READY))
return -EAGAIN;
- if (!(dev->features & VIRTIO_NET_F_MTU))
+ if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
return -ENOTSUP;
*mtu = dev->mtu;
@@ -295,7 +296,8 @@ rte_vhost_get_numa_node(int vid)
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) failed to query numa node: %d\n", vid, ret);
+ "(%d) failed to query numa node: %s\n",
+ vid, rte_strerror(errno));
return -1;
}
@@ -475,3 +477,29 @@ rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
vhost_log_used_vring(dev, vq, offset, len);
}
+
+uint32_t
+rte_vhost_rx_queue_count(int vid, uint16_t qid)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (dev == NULL)
+ return 0;
+
+ if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, qid);
+ return 0;
+ }
+
+ vq = dev->virtqueue[qid];
+ if (vq == NULL)
+ return 0;
+
+ if (unlikely(vq->enabled == 0 || vq->avail == NULL))
+ return 0;
+
+ return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
+}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index ddd8a9c4..6fe72aeb 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -201,13 +201,22 @@ struct virtio_net {
#define VHOST_LOG_PAGE 4096
-static inline void __attribute__((always_inline))
+/*
+ * Atomically set a bit in memory.
+ */
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+ __sync_fetch_and_or_8(addr, (1U << nr));
+}
+
+static __rte_always_inline void
vhost_log_page(uint8_t *log_base, uint64_t page)
{
- log_base[page / 8] |= 1 << (page % 8);
+ vhost_set_bit(page % 8, &log_base[page / 8]);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
{
uint64_t page;
@@ -229,7 +238,7 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
@@ -272,7 +281,7 @@ extern uint64_t VHOST_FEATURES;
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Convert guest physical address to host physical address */
-static inline phys_addr_t __attribute__((always_inline))
+static __rte_always_inline phys_addr_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 28808815..ad2e8d38 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -168,8 +168,12 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
uint64_t vhost_features = 0;
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
- if (features & ~vhost_features)
+ if (features & ~vhost_features) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) received invalid negotiated features.\n",
+ dev->vid);
return -1;
+ }
if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
if (dev->notify_ops->features_changed)
@@ -197,11 +201,11 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
*/
static int
vhost_user_set_vring_num(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- struct vhost_virtqueue *vq = dev->virtqueue[state->index];
+ struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
- vq->size = state->num;
+ vq->size = msg->payload.state.num;
if (dev->dequeue_zero_copy) {
vq->nr_zmbuf = 0;
@@ -332,7 +336,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
* This function then converts these to our address space.
*/
static int
-vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
+vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
{
struct vhost_virtqueue *vq;
@@ -340,11 +344,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
return -1;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
- vq = dev->virtqueue[addr->index];
+ vq = dev->virtqueue[msg->payload.addr.index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
- addr->desc_user_addr);
+ msg->payload.addr.desc_user_addr);
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find desc ring address.\n",
@@ -352,11 +356,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
return -1;
}
- dev = numa_realloc(dev, addr->index);
- vq = dev->virtqueue[addr->index];
+ dev = numa_realloc(dev, msg->payload.addr.index);
+ vq = dev->virtqueue[msg->payload.addr.index];
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
- addr->avail_user_addr);
+ msg->payload.addr.avail_user_addr);
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
@@ -365,7 +369,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
}
vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
- addr->used_user_addr);
+ msg->payload.addr.used_user_addr);
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
@@ -382,7 +386,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
vq->last_avail_idx = vq->used->idx;
}
- vq->log_guest_addr = addr->log_guest_addr;
+ vq->log_guest_addr = msg->payload.addr.log_guest_addr;
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
@@ -401,10 +405,12 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr)
*/
static int
vhost_user_set_vring_base(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- dev->virtqueue[state->index]->last_used_idx = state->num;
- dev->virtqueue[state->index]->last_avail_idx = state->num;
+ dev->virtqueue[msg->payload.state.index]->last_used_idx =
+ msg->payload.state.num;
+ dev->virtqueue[msg->payload.state.index]->last_avail_idx =
+ msg->payload.state.num;
return 0;
}
@@ -517,6 +523,13 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
dev->max_guest_pages = 8;
dev->guest_pages = malloc(dev->max_guest_pages *
sizeof(struct guest_page));
+ if (dev->guest_pages == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) failed to allocate memory "
+ "for dev->guest_pages\n",
+ dev->vid);
+ return -1;
+ }
}
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
@@ -695,9 +708,9 @@ free_zmbufs(struct vhost_virtqueue *vq)
*/
static int
vhost_user_get_vring_base(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- struct vhost_virtqueue *vq = dev->virtqueue[state->index];
+ struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
@@ -708,10 +721,11 @@ vhost_user_get_vring_base(struct virtio_net *dev,
dev->flags &= ~VIRTIO_DEV_READY;
/* Here we are safe to get the last used index */
- state->num = vq->last_used_idx;
+ msg->payload.state.num = vq->last_used_idx;
RTE_LOG(INFO, VHOST_CONFIG,
- "vring base idx:%d file:%d\n", state->index, state->num);
+ "vring base idx:%d file:%d\n", msg->payload.state.index,
+ msg->payload.state.num);
/*
* Based on current qemu vhost-user implementation, this message is
* sent and only sent in vhost_vring_stop.
@@ -736,18 +750,19 @@ vhost_user_get_vring_base(struct virtio_net *dev,
*/
static int
vhost_user_set_vring_enable(struct virtio_net *dev,
- struct vhost_vring_state *state)
+ VhostUserMsg *msg)
{
- int enable = (int)state->num;
+ int enable = (int)msg->payload.state.num;
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
- enable, state->index);
+ enable, msg->payload.state.index);
if (dev->notify_ops->vring_state_changed)
- dev->notify_ops->vring_state_changed(dev->vid, state->index, enable);
+ dev->notify_ops->vring_state_changed(dev->vid,
+ msg->payload.state.index, enable);
- dev->virtqueue[state->index]->enabled = enable;
+ dev->virtqueue[msg->payload.state.index]->enabled = enable;
return 0;
}
@@ -1036,17 +1051,17 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_VRING_NUM:
- vhost_user_set_vring_num(dev, &msg.payload.state);
+ vhost_user_set_vring_num(dev, &msg);
break;
case VHOST_USER_SET_VRING_ADDR:
- vhost_user_set_vring_addr(dev, &msg.payload.addr);
+ vhost_user_set_vring_addr(dev, &msg);
break;
case VHOST_USER_SET_VRING_BASE:
- vhost_user_set_vring_base(dev, &msg.payload.state);
+ vhost_user_set_vring_base(dev, &msg);
break;
case VHOST_USER_GET_VRING_BASE:
- vhost_user_get_vring_base(dev, &msg.payload.state);
+ vhost_user_get_vring_base(dev, &msg);
msg.size = sizeof(msg.payload.state);
send_vhost_message(fd, &msg);
break;
@@ -1071,7 +1086,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_VRING_ENABLE:
- vhost_user_set_vring_enable(dev, &msg.payload.state);
+ vhost_user_set_vring_enable(dev, &msg);
break;
case VHOST_USER_SEND_RARP:
vhost_user_send_rarp(dev, &msg);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 48219e05..a5f0eeba 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -55,7 +55,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
{
@@ -67,7 +67,7 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
size * sizeof(struct vring_used_elem));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
@@ -95,7 +95,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
sizeof(vq->used->idx));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint16_t len)
{
@@ -114,11 +114,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- if (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+
+ if (m_buf->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
- switch (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ switch (csum_l4) {
case PKT_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct tcp_hdr,
cksum));
@@ -138,6 +143,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
}
+ /* IP cksum verification cannot be bypassed, then calculate here */
+ if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ struct ipv4_hdr *ipv4_hdr;
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
+ m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ }
+
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
if (m_buf->ol_flags & PKT_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -153,7 +167,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
{
@@ -233,11 +247,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are succesfully
+ * count is returned to indicate the number of packets that are successfully
* added to the RX queue. This function works when the mbuf is scattered, but
* it doesn't support the mergeable feature.
*/
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -335,7 +349,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
return count;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
@@ -424,7 +438,7 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
struct buf_vector *buf_vec, uint16_t num_buffers)
{
@@ -512,7 +526,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
return 0;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -601,9 +615,11 @@ static inline bool
virtio_net_with_host_offload(struct virtio_net *dev)
{
if (dev->features &
- (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
- VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
- VIRTIO_NET_F_HOST_UFO))
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
return true;
return false;
@@ -655,7 +671,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
{
uint16_t l4_proto = 0;
@@ -743,13 +759,13 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
put_zmbuf(struct zcopy_mbuf *zmbuf)
{
zmbuf->in_use = 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
@@ -899,7 +915,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t used_idx, uint32_t desc_idx)
{
@@ -910,7 +926,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
sizeof(vq->used->ring[used_idx]));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t count)
{
@@ -930,7 +946,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
eventfd_write(vq->callfd, (eventfd_t)1);
}
-static inline struct zcopy_mbuf *__attribute__((always_inline))
+static __rte_always_inline struct zcopy_mbuf *
get_zmbuf(struct vhost_virtqueue *vq)
{
uint16_t i;
@@ -961,7 +977,7 @@ again:
return NULL;
}
-static inline bool __attribute__((always_inline))
+static __rte_always_inline bool
mbuf_is_consumed(struct rte_mbuf *m)
{
while (m) {