aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_vhost
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librte_vhost')
-rw-r--r--lib/librte_vhost/iotlb.c20
-rw-r--r--lib/librte_vhost/iotlb.h3
-rw-r--r--lib/librte_vhost/socket.c7
-rw-r--r--lib/librte_vhost/vhost.c14
-rw-r--r--lib/librte_vhost/vhost.h6
-rw-r--r--lib/librte_vhost/vhost_user.c104
-rw-r--r--lib/librte_vhost/virtio_net.c46
7 files changed, 181 insertions, 19 deletions
diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
index b74cc6a7..c11ebcaa 100644
--- a/lib/librte_vhost/iotlb.c
+++ b/lib/librte_vhost/iotlb.c
@@ -51,6 +51,9 @@ struct vhost_iotlb_entry {
#define IOTLB_CACHE_SIZE 2048
static void
+vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq);
+
+static void
vhost_user_iotlb_pending_remove_all(struct vhost_virtqueue *vq)
{
struct vhost_iotlb_entry *node, *temp_node;
@@ -95,9 +98,11 @@ vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
if (ret) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "IOTLB pool empty, clear pending misses\n");
- vhost_user_iotlb_pending_remove_all(vq);
+ RTE_LOG(DEBUG, VHOST_CONFIG, "IOTLB pool empty, clear entries\n");
+ if (!TAILQ_EMPTY(&vq->iotlb_pending_list))
+ vhost_user_iotlb_pending_remove_all(vq);
+ else
+ vhost_user_iotlb_cache_random_evict(vq);
ret = rte_mempool_get(vq->iotlb_pool, (void **)&node);
if (ret) {
RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
@@ -115,7 +120,7 @@ vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq,
rte_rwlock_write_unlock(&vq->iotlb_pending_lock);
}
-static void
+void
vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq,
uint64_t iova, uint64_t size, uint8_t perm)
{
@@ -186,8 +191,11 @@ vhost_user_iotlb_cache_insert(struct vhost_virtqueue *vq, uint64_t iova,
ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
if (ret) {
- RTE_LOG(DEBUG, VHOST_CONFIG, "IOTLB pool empty, evict one entry\n");
- vhost_user_iotlb_cache_random_evict(vq);
+ RTE_LOG(DEBUG, VHOST_CONFIG, "IOTLB pool empty, clear entries\n");
+ if (!TAILQ_EMPTY(&vq->iotlb_list))
+ vhost_user_iotlb_cache_random_evict(vq);
+ else
+ vhost_user_iotlb_pending_remove_all(vq);
ret = rte_mempool_get(vq->iotlb_pool, (void **)&new_node);
if (ret) {
RTE_LOG(ERR, VHOST_CONFIG, "IOTLB pool still empty, failure\n");
diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h
index f1a050e4..e7083e37 100644
--- a/lib/librte_vhost/iotlb.h
+++ b/lib/librte_vhost/iotlb.h
@@ -71,6 +71,9 @@ bool vhost_user_iotlb_pending_miss(struct vhost_virtqueue *vq, uint64_t iova,
uint8_t perm);
void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
uint8_t perm);
+void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
+ uint64_t size, uint8_t perm);
+
int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
#endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 422da002..811e6bf1 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -472,7 +472,7 @@ vhost_user_reconnect_init(void)
ret = pthread_create(&reconn_tid, NULL,
vhost_user_client_reconnect, NULL);
- if (ret < 0) {
+ if (ret != 0) {
RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
if (pthread_mutex_destroy(&reconn_list.mutex)) {
RTE_LOG(ERR, VHOST_CONFIG,
@@ -678,9 +678,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
- if (vhost_user_reconnect_init() < 0) {
+ if (vhost_user_reconnect_init() != 0)
goto out_mutex;
- }
}
} else {
vsocket->is_server = true;
@@ -837,7 +836,7 @@ rte_vhost_driver_start(const char *path)
if (fdset_tid == 0) {
int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
&vhost_user.fdset);
- if (ret < 0)
+ if (ret != 0)
RTE_LOG(ERR, VHOST_CONFIG,
"failed to create fdset handling thread");
}
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 4f8b73a0..51ea720a 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -71,7 +71,9 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (tmp_size == size)
return vva;
- if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
+ iova += tmp_size;
+
+ if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
/*
* iotlb_lock is read-locked for a full burst,
* but it only protects the iotlb cache.
@@ -81,8 +83,13 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
*/
vhost_user_iotlb_rd_unlock(vq);
- vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
- vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
+ vhost_user_iotlb_pending_insert(vq, iova, perm);
+ if (vhost_user_iotlb_miss(dev, iova, perm)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+ iova);
+ vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
+ }
vhost_user_iotlb_rd_lock(vq);
}
@@ -259,6 +266,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
dev->virtqueue[vring_idx] = vq;
init_vring_queue(dev, vring_idx);
+ rte_spinlock_init(&vq->access_lock);
dev->nr_vring += 1;
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 1cc81c17..c8f2a817 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -108,12 +108,14 @@ struct vhost_virtqueue {
/* Backend value to determine if device should started/stopped */
int backend;
+ int enabled;
+ int access_ok;
+ rte_spinlock_t access_lock;
+
/* Used to notify the guest (trigger interrupt) */
int callfd;
/* Currently unused as polling mode is enabled */
int kickfd;
- int enabled;
- int access_ok;
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index f4c7ce46..3acaacf5 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -573,6 +573,30 @@ dump_guest_pages(struct virtio_net *dev)
#define dump_guest_pages(dev)
#endif
+static bool
+vhost_memory_changed(struct VhostUserMemory *new,
+ struct rte_vhost_memory *old)
+{
+ uint32_t i;
+
+ if (new->nregions != old->nregions)
+ return true;
+
+ for (i = 0; i < new->nregions; ++i) {
+ VhostUserMemoryRegion *new_r = &new->regions[i];
+ struct rte_vhost_mem_region *old_r = &old->regions[i];
+
+ if (new_r->guest_phys_addr != old_r->guest_phys_addr)
+ return true;
+ if (new_r->memory_size != old_r->size)
+ return true;
+ if (new_r->userspace_addr != old_r->guest_user_addr)
+ return true;
+ }
+
+ return false;
+}
+
static int
vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
@@ -585,6 +609,16 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
uint32_t i;
int fd;
+ if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "(%d) memory regions not changed\n", dev->vid);
+
+ for (i = 0; i < memory.nregions; i++)
+ close(pmsg->fds[i]);
+
+ return 0;
+ }
+
if (dev->mem) {
free_mem_region(dev);
rte_free(dev->mem);
@@ -1190,12 +1224,47 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
return alloc_vring_queue(dev, vring_idx);
}
+static void
+vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+{
+ unsigned int i = 0;
+ unsigned int vq_num = 0;
+
+ while (vq_num < dev->nr_vring) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (vq) {
+ rte_spinlock_lock(&vq->access_lock);
+ vq_num++;
+ }
+ i++;
+ }
+}
+
+static void
+vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+{
+ unsigned int i = 0;
+ unsigned int vq_num = 0;
+
+ while (vq_num < dev->nr_vring) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (vq) {
+ rte_spinlock_unlock(&vq->access_lock);
+ vq_num++;
+ }
+ i++;
+ }
+}
+
int
vhost_user_msg_handler(int vid, int fd)
{
struct virtio_net *dev;
struct VhostUserMsg msg;
int ret;
+ int unlock_required = 0;
dev = get_device(vid);
if (dev == NULL)
@@ -1241,6 +1310,38 @@ vhost_user_msg_handler(int vid, int fd)
return -1;
}
+ /*
+ * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE,
+ * since it is sent when virtio stops and device is destroyed.
+ * destroy_device waits for queues to be inactive, so it is safe.
+ * Otherwise taking the access_lock would cause a dead lock.
+ */
+ switch (msg.request.master) {
+ case VHOST_USER_SET_FEATURES:
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ case VHOST_USER_SET_MEM_TABLE:
+ case VHOST_USER_SET_LOG_BASE:
+ case VHOST_USER_SET_LOG_FD:
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_ADDR:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ case VHOST_USER_SET_VRING_ENABLE:
+ case VHOST_USER_SEND_RARP:
+ case VHOST_USER_NET_SET_MTU:
+ case VHOST_USER_SET_SLAVE_REQ_FD:
+ vhost_user_lock_all_queue_pairs(dev);
+ unlock_required = 1;
+ break;
+ default:
+ break;
+
+ }
+
switch (msg.request.master) {
case VHOST_USER_GET_FEATURES:
msg.payload.u64 = vhost_user_get_features(dev);
@@ -1342,6 +1443,9 @@ vhost_user_msg_handler(int vid, int fd)
}
+ if (unlock_required)
+ vhost_user_unlock_all_queue_pairs(dev);
+
if (msg.flags & VHOST_USER_NEED_REPLY) {
msg.payload.u64 = !!ret;
msg.size = sizeof(msg.payload.u64);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 6fee16e5..d3470307 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -44,6 +44,7 @@
#include <rte_udp.h>
#include <rte_sctp.h>
#include <rte_arp.h>
+#include <rte_spinlock.h>
#include "iotlb.h"
#include "vhost.h"
@@ -326,8 +327,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0))
- return 0;
+ goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
@@ -419,6 +423,9 @@ out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
return count;
}
@@ -651,8 +658,11 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0))
- return 0;
+ goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
@@ -715,6 +725,9 @@ out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
return pkt_idx;
}
@@ -977,7 +990,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
desc->addr + desc_offset, cpy_len)))) {
cur->data_len = cpy_len;
cur->data_off = 0;
- cur->buf_addr = (void *)(uintptr_t)desc_addr;
+ cur->buf_addr = (void *)(uintptr_t)(desc_addr
+ + desc_offset);
cur->buf_iova = hpa;
/*
@@ -1156,6 +1170,22 @@ mbuf_is_consumed(struct rte_mbuf *m)
return true;
}
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+ uint32_t mbuf_size, priv_size;
+
+ while (m) {
+ priv_size = rte_pktmbuf_priv_size(m->pool);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ /* start of buffer is after mbuf structure and priv data */
+
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m = m->next;
+ }
+}
+
uint16_t
rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1180,9 +1210,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
}
vq = dev->virtqueue[queue_id];
- if (unlikely(vq->enabled == 0))
+
+ if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
return 0;
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
vq->batch_copy_nb_elems = 0;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
@@ -1207,6 +1241,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
nr_updated += 1;
TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
rte_pktmbuf_free(zmbuf->mbuf);
put_zmbuf(zmbuf);
vq->nr_zmbuf -= 1;
@@ -1356,6 +1391,9 @@ out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
if (unlikely(rarp_mbuf != NULL)) {
/*
* Inject it to the head of "pkts" array, so that switch's mac