From 9365d6cfd641d5ade83591c5f5dfa2bf32808e4e Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Thu, 8 Jun 2017 09:46:51 -0600 Subject: Imported Upstream version 16.11.2 Change-Id: Iae65ee14970fb0a80d1cba3ff77c3cf7db28bd60 Signed-off-by: Christian Ehrhardt --- lib/librte_vhost/socket.c | 48 ++++++++++++++++++++++++++++++++----------- lib/librte_vhost/vhost.c | 2 +- lib/librte_vhost/vhost.h | 18 ++++------------ lib/librte_vhost/virtio_net.c | 18 ++++++++++++++-- 4 files changed, 57 insertions(+), 29 deletions(-) (limited to 'lib/librte_vhost') diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c index aaa9c270..84e05951 100644 --- a/lib/librte_vhost/socket.c +++ b/lib/librte_vhost/socket.c @@ -52,14 +52,18 @@ #include "vhost.h" #include "vhost_user.h" + +TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection); + /* * Every time rte_vhost_driver_register() is invoked, an associated * vhost_user_socket struct will be created. */ struct vhost_user_socket { + struct vhost_user_connection_list conn_list; + pthread_mutex_t conn_mutex; char *path; int listenfd; - int connfd; bool is_server; bool reconnect; bool dequeue_zero_copy; @@ -67,7 +71,10 @@ struct vhost_user_socket { struct vhost_user_connection { struct vhost_user_socket *vsocket; + int connfd; int vid; + + TAILQ_ENTRY(vhost_user_connection) next; }; #define MAX_VHOST_SOCKET 1024 @@ -209,19 +216,24 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid); - vsocket->connfd = fd; + conn->connfd = fd; conn->vsocket = vsocket; conn->vid = vid; ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb, NULL, conn); if (ret < 0) { - vsocket->connfd = -1; + conn->connfd = -1; free(conn); close(fd); RTE_LOG(ERR, VHOST_CONFIG, "failed to add fd %d into vhost server fdset\n", fd); + return; } + + pthread_mutex_lock(&vsocket->conn_mutex); + TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next); + pthread_mutex_unlock(&vsocket->conn_mutex); } /* call back when there is new vhost-user connection from client */ @@ -247,10 +259,14 @@ vhost_user_read_cb(int connfd, void *dat, int *remove) ret = vhost_user_msg_handler(conn->vid, connfd); if (ret < 0) { - vsocket->connfd = -1; close(connfd); *remove = 1; vhost_destroy_device(conn->vid); + + pthread_mutex_lock(&vsocket->conn_mutex); + TAILQ_REMOVE(&vsocket->conn_list, conn, next); + pthread_mutex_unlock(&vsocket->conn_mutex); + free(conn); if (vsocket->reconnect) @@ -448,7 +464,7 @@ vhost_user_create_client(struct vhost_user_socket *vsocket) return 0; } - RTE_LOG(ERR, VHOST_CONFIG, + RTE_LOG(WARNING, VHOST_CONFIG, "failed to connect to %s: %s\n", path, strerror(errno)); @@ -457,7 +473,7 @@ vhost_user_create_client(struct vhost_user_socket *vsocket) return -1; } - RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path); + RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path); reconn = malloc(sizeof(*reconn)); if (reconn == NULL) { RTE_LOG(ERR, VHOST_CONFIG, @@ -502,7 +518,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags) goto out; memset(vsocket, 0, sizeof(struct vhost_user_socket)); vsocket->path = strdup(path); - vsocket->connfd = -1; + TAILQ_INIT(&vsocket->conn_list); + pthread_mutex_init(&vsocket->conn_mutex, NULL); vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY; if ((flags & RTE_VHOST_USER_CLIENT) != 0) { @@ -565,7 +582,7 @@ rte_vhost_driver_unregister(const char *path) { int i; int count; - struct vhost_user_connection *conn; + struct vhost_user_connection *conn, *next; pthread_mutex_lock(&vhost_user.mutex); @@ -581,15 +598,22 @@ rte_vhost_driver_unregister(const char *path) vhost_user_remove_reconnect(vsocket); } - conn = fdset_del(&vhost_user.fdset, vsocket->connfd); - if (conn) { + pthread_mutex_lock(&vsocket->conn_mutex); + for (conn = TAILQ_FIRST(&vsocket->conn_list); + conn != NULL; + conn = next) { + next = TAILQ_NEXT(conn, next); + + fdset_del(&vhost_user.fdset, conn->connfd); RTE_LOG(INFO, VHOST_CONFIG, "free connfd = %d for device '%s'\n", - vsocket->connfd, path); - close(vsocket->connfd); + conn->connfd, path); + close(conn->connfd); vhost_destroy_device(conn->vid); + TAILQ_REMOVE(&vsocket->conn_list, conn, next); free(conn); } + pthread_mutex_unlock(&vsocket->conn_mutex); free(vsocket->path); free(vsocket); diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index e4150934..3c3f6a42 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -56,7 +56,7 @@ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ (1ULL << VIRTIO_NET_F_CTRL_RX) | \ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \ - (VHOST_SUPPORTS_MQ) | \ + (1ULL << VIRTIO_NET_F_MQ) | \ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VHOST_F_LOG_ALL) | \ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 22564f1c..d97df1d8 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -110,25 +110,15 @@ struct vhost_virtqueue { uint16_t shadow_used_idx; } __rte_cache_aligned; -/* Old kernels have no such macro defined */ +/* Old kernels have no such macros defined */ #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 #endif - -/* - * Make an extra wrapper for VIRTIO_NET_F_MQ and - * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are - * introduced since kernel v3.8. This makes our - * code buildable for older kernel. - */ -#ifdef VIRTIO_NET_F_MQ - #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX - #define VHOST_SUPPORTS_MQ (1ULL << VIRTIO_NET_F_MQ) -#else - #define VHOST_MAX_QUEUE_PAIRS 1 - #define VHOST_SUPPORTS_MQ 0 +#ifndef VIRTIO_NET_F_MQ + #define VIRTIO_NET_F_MQ 22 #endif +#define VHOST_MAX_QUEUE_PAIRS 0x80 /* * Define virtio 1.0 for older kernels diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 337470d6..ea027f14 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -905,6 +905,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, "allocate memory for mbuf.\n"); return -1; } + if (unlikely(dev->dequeue_zero_copy)) + rte_mbuf_refcnt_update(cur, 1); prev->next = cur; prev->data_len = mbuf_offset; @@ -1056,9 +1058,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, * array, to looks like that guest actually send such packet. * * Check user_send_rarp() for more information. + * + * broadcast_rarp shares a cacheline in the virtio_net structure + * with some fields that are accessed during enqueue and + * rte_atomic16_cmpset() causes a write if using cmpxchg. This could + * result in false sharing between enqueue and dequeue. + * + * Prevent unnecessary false sharing by reading broadcast_rarp first + * and only performing cmpset if the read indicates it is likely to + * be set. */ - if (unlikely(rte_atomic16_cmpset((volatile uint16_t *) - &dev->broadcast_rarp.cnt, 1, 0))) { + + if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && + rte_atomic16_cmpset((volatile uint16_t *) + &dev->broadcast_rarp.cnt, 1, 0))) { + rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool); if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, -- cgit 1.2.3-korg