From 8b25d1ad5d2264bdfc2818c7bda74ee2697df6db Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Wed, 6 Jul 2016 09:22:35 +0200 Subject: Imported Upstream version 16.07-rc1 Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt --- lib/librte_vhost/vhost_user/vhost-net-user.c | 450 +++++++++++++++++--------- lib/librte_vhost/vhost_user/vhost-net-user.h | 8 +- lib/librte_vhost/vhost_user/virtio-net-user.c | 102 +++--- lib/librte_vhost/vhost_user/virtio-net-user.h | 18 +- 4 files changed, 374 insertions(+), 204 deletions(-) (limited to 'lib/librte_vhost/vhost_user') diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c index df2bd648..94f1b923 100644 --- a/lib/librte_vhost/vhost_user/vhost-net-user.c +++ b/lib/librte_vhost/vhost_user/vhost-net-user.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include #include #include @@ -51,32 +53,44 @@ #include "vhost-net.h" #include "virtio-net-user.h" -#define MAX_VIRTIO_BACKLOG 128 - -static void vserver_new_vq_conn(int fd, void *data, int *remove); -static void vserver_message_handler(int fd, void *dat, int *remove); +/* + * Every time rte_vhost_driver_register() is invoked, an associated + * vhost_user_socket struct will be created. + */ +struct vhost_user_socket { + char *path; + int listenfd; + bool is_server; + bool reconnect; +}; -struct connfd_ctx { - struct vhost_server *vserver; - uint32_t fh; +struct vhost_user_connection { + struct vhost_user_socket *vsocket; + int vid; }; -#define MAX_VHOST_SERVER 1024 -struct _vhost_server { - struct vhost_server *server[MAX_VHOST_SERVER]; +#define MAX_VHOST_SOCKET 1024 +struct vhost_user { + struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET]; struct fdset fdset; - int vserver_cnt; - pthread_mutex_t server_mutex; + int vsocket_cnt; + pthread_mutex_t mutex; }; -static struct _vhost_server g_vhost_server = { +#define MAX_VIRTIO_BACKLOG 128 + +static void vhost_user_server_new_connection(int fd, void *data, int *remove); +static void vhost_user_msg_handler(int fd, void *dat, int *remove); +static int vhost_user_create_client(struct vhost_user_socket *vsocket); + +static struct vhost_user vhost_user = { .fdset = { .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} }, .fd_mutex = PTHREAD_MUTEX_INITIALIZER, .num = 0 }, - .vserver_cnt = 0, - .server_mutex = PTHREAD_MUTEX_INITIALIZER, + .vsocket_cnt = 0, + .mutex = PTHREAD_MUTEX_INITIALIZER, }; static const char *vhost_message_str[VHOST_USER_MAX] = { @@ -102,48 +116,6 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP", }; -/** - * Create a unix domain socket, bind to path and listen for connection. - * @return - * socket fd or -1 on failure - */ -static int -uds_socket(const char *path) -{ - struct sockaddr_un un; - int sockfd; - int ret; - - if (path == NULL) - return -1; - - sockfd = socket(AF_UNIX, SOCK_STREAM, 0); - if (sockfd < 0) - return -1; - RTE_LOG(INFO, VHOST_CONFIG, "socket created, fd:%d\n", sockfd); - - memset(&un, 0, sizeof(un)); - un.sun_family = AF_UNIX; - snprintf(un.sun_path, sizeof(un.sun_path), "%s", path); - ret = bind(sockfd, (struct sockaddr *)&un, sizeof(un)); - if (ret == -1) { - RTE_LOG(ERR, VHOST_CONFIG, "fail to bind fd:%d, remove file:%s and try again.\n", - sockfd, path); - goto err; - } - RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path); - - ret = listen(sockfd, MAX_VIRTIO_BACKLOG); - if (ret == -1) - goto err; - - return sockfd; - -err: - close(sockfd); - return -1; -} - /* return bytes# of read on success or negative val on failure. */ static int read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) @@ -278,62 +250,66 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg) return ret; } -/* call back when there is new virtio connection. */ + static void -vserver_new_vq_conn(int fd, void *dat, __rte_unused int *remove) +vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) { - struct vhost_server *vserver = (struct vhost_server *)dat; - int conn_fd; - struct connfd_ctx *ctx; - int fh; - struct vhost_device_ctx vdev_ctx = { (pid_t)0, 0 }; - unsigned int size; - - conn_fd = accept(fd, NULL, NULL); - RTE_LOG(INFO, VHOST_CONFIG, - "new virtio connection is %d\n", conn_fd); - if (conn_fd < 0) - return; + int vid; + size_t size; + struct vhost_user_connection *conn; - ctx = calloc(1, sizeof(*ctx)); - if (ctx == NULL) { - close(conn_fd); + conn = malloc(sizeof(*conn)); + if (conn == NULL) { + close(fd); return; } - fh = vhost_new_device(vdev_ctx); - if (fh == -1) { - free(ctx); - close(conn_fd); + vid = vhost_new_device(); + if (vid == -1) { + close(fd); + free(conn); return; } - vdev_ctx.fh = fh; - size = strnlen(vserver->path, PATH_MAX); - vhost_set_ifname(vdev_ctx, vserver->path, - size); + size = strnlen(vsocket->path, PATH_MAX); + vhost_set_ifname(vid, vsocket->path, size); + + RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid); - RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", fh); + conn->vsocket = vsocket; + conn->vid = vid; + fdset_add(&vhost_user.fdset, fd, vhost_user_msg_handler, NULL, conn); +} + +/* call back when there is new vhost-user connection from client */ +static void +vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused) +{ + struct vhost_user_socket *vsocket = dat; - ctx->vserver = vserver; - ctx->fh = fh; - fdset_add(&g_vhost_server.fdset, - conn_fd, vserver_message_handler, NULL, ctx); + fd = accept(fd, NULL, NULL); + if (fd < 0) + return; + + RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd); + vhost_user_add_connection(fd, vsocket); } /* callback when there is message on the connfd */ static void -vserver_message_handler(int connfd, void *dat, int *remove) +vhost_user_msg_handler(int connfd, void *dat, int *remove) { - struct vhost_device_ctx ctx; - struct connfd_ctx *cfd_ctx = (struct connfd_ctx *)dat; + int vid; + struct vhost_user_connection *conn = dat; struct VhostUserMsg msg; uint64_t features; int ret; - ctx.fh = cfd_ctx->fh; + vid = conn->vid; ret = read_vhost_message(connfd, &msg); if (ret <= 0 || msg.request >= VHOST_USER_MAX) { + struct vhost_user_socket *vsocket = conn->vsocket; + if (ret < 0) RTE_LOG(ERR, VHOST_CONFIG, "vhost read message failed\n"); @@ -346,8 +322,11 @@ vserver_message_handler(int connfd, void *dat, int *remove) close(connfd); *remove = 1; - free(cfd_ctx); - vhost_destroy_device(ctx); + free(conn); + vhost_destroy_device(vid); + + if (vsocket->reconnect) + vhost_user_create_client(vsocket); return; } @@ -356,14 +335,14 @@ vserver_message_handler(int connfd, void *dat, int *remove) vhost_message_str[msg.request]); switch (msg.request) { case VHOST_USER_GET_FEATURES: - ret = vhost_get_features(ctx, &features); + ret = vhost_get_features(vid, &features); msg.payload.u64 = features; msg.size = sizeof(msg.payload.u64); send_vhost_message(connfd, &msg); break; case VHOST_USER_SET_FEATURES: features = msg.payload.u64; - vhost_set_features(ctx, &features); + vhost_set_features(vid, &features); break; case VHOST_USER_GET_PROTOCOL_FEATURES: @@ -372,22 +351,22 @@ vserver_message_handler(int connfd, void *dat, int *remove) send_vhost_message(connfd, &msg); break; case VHOST_USER_SET_PROTOCOL_FEATURES: - user_set_protocol_features(ctx, msg.payload.u64); + user_set_protocol_features(vid, msg.payload.u64); break; case VHOST_USER_SET_OWNER: - vhost_set_owner(ctx); + vhost_set_owner(vid); break; case VHOST_USER_RESET_OWNER: - vhost_reset_owner(ctx); + vhost_reset_owner(vid); break; case VHOST_USER_SET_MEM_TABLE: - user_set_mem_table(ctx, &msg); + user_set_mem_table(vid, &msg); break; case VHOST_USER_SET_LOG_BASE: - user_set_log_base(ctx, &msg); + user_set_log_base(vid, &msg); /* it needs a reply */ msg.size = sizeof(msg.payload.u64); @@ -399,26 +378,26 @@ vserver_message_handler(int connfd, void *dat, int *remove) break; case VHOST_USER_SET_VRING_NUM: - vhost_set_vring_num(ctx, &msg.payload.state); + vhost_set_vring_num(vid, &msg.payload.state); break; case VHOST_USER_SET_VRING_ADDR: - vhost_set_vring_addr(ctx, &msg.payload.addr); + vhost_set_vring_addr(vid, &msg.payload.addr); break; case VHOST_USER_SET_VRING_BASE: - vhost_set_vring_base(ctx, &msg.payload.state); + vhost_set_vring_base(vid, &msg.payload.state); break; case VHOST_USER_GET_VRING_BASE: - ret = user_get_vring_base(ctx, &msg.payload.state); + ret = user_get_vring_base(vid, &msg.payload.state); msg.size = sizeof(msg.payload.state); send_vhost_message(connfd, &msg); break; case VHOST_USER_SET_VRING_KICK: - user_set_vring_kick(ctx, &msg); + user_set_vring_kick(vid, &msg); break; case VHOST_USER_SET_VRING_CALL: - user_set_vring_call(ctx, &msg); + user_set_vring_call(vid, &msg); break; case VHOST_USER_SET_VRING_ERR: @@ -434,10 +413,10 @@ vserver_message_handler(int connfd, void *dat, int *remove) break; case VHOST_USER_SET_VRING_ENABLE: - user_set_vring_enable(ctx, &msg.payload.state); + user_set_vring_enable(vid, &msg.payload.state); break; case VHOST_USER_SEND_RARP: - user_send_rarp(ctx, &msg); + user_send_rarp(vid, &msg); break; default: @@ -446,50 +425,222 @@ vserver_message_handler(int connfd, void *dat, int *remove) } } -/** - * Creates and initialise the vhost server. - */ -int -rte_vhost_driver_register(const char *path) +static int +create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server) { - struct vhost_server *vserver; + int fd; - pthread_mutex_lock(&g_vhost_server.server_mutex); + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) + return -1; + RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n", + is_server ? "server" : "client", fd); - if (g_vhost_server.vserver_cnt == MAX_VHOST_SERVER) { - RTE_LOG(ERR, VHOST_CONFIG, - "error: the number of servers reaches maximum\n"); - pthread_mutex_unlock(&g_vhost_server.server_mutex); + memset(un, 0, sizeof(*un)); + un->sun_family = AF_UNIX; + strncpy(un->sun_path, path, sizeof(un->sun_path)); + + return fd; +} + +static int +vhost_user_create_server(struct vhost_user_socket *vsocket) +{ + int fd; + int ret; + struct sockaddr_un un; + const char *path = vsocket->path; + + fd = create_unix_socket(path, &un, vsocket->is_server); + if (fd < 0) return -1; + + ret = bind(fd, (struct sockaddr *)&un, sizeof(un)); + if (ret < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to bind to %s: %s; remove it and try again\n", + path, strerror(errno)); + goto err; } + RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path); - vserver = calloc(sizeof(struct vhost_server), 1); - if (vserver == NULL) { - pthread_mutex_unlock(&g_vhost_server.server_mutex); - return -1; + ret = listen(fd, MAX_VIRTIO_BACKLOG); + if (ret < 0) + goto err; + + vsocket->listenfd = fd; + fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection, + NULL, vsocket); + + return 0; + +err: + close(fd); + return -1; +} + +struct vhost_user_reconnect { + struct sockaddr_un un; + int fd; + struct vhost_user_socket *vsocket; + + TAILQ_ENTRY(vhost_user_reconnect) next; +}; + +TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect); +struct vhost_user_reconnect_list { + struct vhost_user_reconnect_tailq_list head; + pthread_mutex_t mutex; +}; + +static struct vhost_user_reconnect_list reconn_list; +static pthread_t reconn_tid; + +static void * +vhost_user_client_reconnect(void *arg __rte_unused) +{ + struct vhost_user_reconnect *reconn, *next; + + while (1) { + pthread_mutex_lock(&reconn_list.mutex); + + /* + * An equal implementation of TAILQ_FOREACH_SAFE, + * which does not exist on all platforms. + */ + for (reconn = TAILQ_FIRST(&reconn_list.head); + reconn != NULL; reconn = next) { + next = TAILQ_NEXT(reconn, next); + + if (connect(reconn->fd, (struct sockaddr *)&reconn->un, + sizeof(reconn->un)) < 0) + continue; + + RTE_LOG(INFO, VHOST_CONFIG, + "%s: connected\n", reconn->vsocket->path); + vhost_user_add_connection(reconn->fd, reconn->vsocket); + TAILQ_REMOVE(&reconn_list.head, reconn, next); + free(reconn); + } + + pthread_mutex_unlock(&reconn_list.mutex); + sleep(1); } - vserver->listenfd = uds_socket(path); - if (vserver->listenfd < 0) { - free(vserver); - pthread_mutex_unlock(&g_vhost_server.server_mutex); + return NULL; +} + +static int +vhost_user_reconnect_init(void) +{ + int ret; + + pthread_mutex_init(&reconn_list.mutex, NULL); + TAILQ_INIT(&reconn_list.head); + + ret = pthread_create(&reconn_tid, NULL, + vhost_user_client_reconnect, NULL); + if (ret < 0) + RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread"); + + return ret; +} + +static int +vhost_user_create_client(struct vhost_user_socket *vsocket) +{ + int fd; + int ret; + struct sockaddr_un un; + const char *path = vsocket->path; + struct vhost_user_reconnect *reconn; + + fd = create_unix_socket(path, &un, vsocket->is_server); + if (fd < 0) return -1; + + ret = connect(fd, (struct sockaddr *)&un, sizeof(un)); + if (ret == 0) { + vhost_user_add_connection(fd, vsocket); + return 0; } - vserver->path = strdup(path); + RTE_LOG(ERR, VHOST_CONFIG, + "failed to connect to %s: %s\n", + path, strerror(errno)); - fdset_add(&g_vhost_server.fdset, vserver->listenfd, - vserver_new_vq_conn, NULL, vserver); + if (!vsocket->reconnect) { + close(fd); + return -1; + } - g_vhost_server.server[g_vhost_server.vserver_cnt++] = vserver; - pthread_mutex_unlock(&g_vhost_server.server_mutex); + RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path); + reconn = malloc(sizeof(*reconn)); + reconn->un = un; + reconn->fd = fd; + reconn->vsocket = vsocket; + pthread_mutex_lock(&reconn_list.mutex); + TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next); + pthread_mutex_unlock(&reconn_list.mutex); return 0; } +/* + * Register a new vhost-user socket; here we could act as server + * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag + * is set. + */ +int +rte_vhost_driver_register(const char *path, uint64_t flags) +{ + int ret = -1; + struct vhost_user_socket *vsocket; + + if (!path) + return -1; + + pthread_mutex_lock(&vhost_user.mutex); + + if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) { + RTE_LOG(ERR, VHOST_CONFIG, + "error: the number of vhost sockets reaches maximum\n"); + goto out; + } + + vsocket = malloc(sizeof(struct vhost_user_socket)); + if (!vsocket) + goto out; + memset(vsocket, 0, sizeof(struct vhost_user_socket)); + vsocket->path = strdup(path); + + if ((flags & RTE_VHOST_USER_CLIENT) != 0) { + vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); + if (vsocket->reconnect && reconn_tid == 0) { + if (vhost_user_reconnect_init() < 0) + goto out; + } + ret = vhost_user_create_client(vsocket); + } else { + vsocket->is_server = true; + ret = vhost_user_create_server(vsocket); + } + if (ret < 0) { + free(vsocket->path); + free(vsocket); + goto out; + } + + vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket; + +out: + pthread_mutex_unlock(&vhost_user.mutex); + + return ret; +} /** - * Unregister the specified vhost server + * Unregister the specified vhost socket */ int rte_vhost_driver_unregister(const char *path) @@ -497,28 +648,29 @@ rte_vhost_driver_unregister(const char *path) int i; int count; - pthread_mutex_lock(&g_vhost_server.server_mutex); - - for (i = 0; i < g_vhost_server.vserver_cnt; i++) { - if (!strcmp(g_vhost_server.server[i]->path, path)) { - fdset_del(&g_vhost_server.fdset, - g_vhost_server.server[i]->listenfd); + pthread_mutex_lock(&vhost_user.mutex); - close(g_vhost_server.server[i]->listenfd); - free(g_vhost_server.server[i]->path); - free(g_vhost_server.server[i]); + for (i = 0; i < vhost_user.vsocket_cnt; i++) { + if (!strcmp(vhost_user.vsockets[i]->path, path)) { + if (vhost_user.vsockets[i]->is_server) { + fdset_del(&vhost_user.fdset, + vhost_user.vsockets[i]->listenfd); + close(vhost_user.vsockets[i]->listenfd); + unlink(path); + } - unlink(path); + free(vhost_user.vsockets[i]->path); + free(vhost_user.vsockets[i]); - count = --g_vhost_server.vserver_cnt; - g_vhost_server.server[i] = g_vhost_server.server[count]; - g_vhost_server.server[count] = NULL; - pthread_mutex_unlock(&g_vhost_server.server_mutex); + count = --vhost_user.vsocket_cnt; + vhost_user.vsockets[i] = vhost_user.vsockets[count]; + vhost_user.vsockets[count] = NULL; + pthread_mutex_unlock(&vhost_user.mutex); return 0; } } - pthread_mutex_unlock(&g_vhost_server.server_mutex); + pthread_mutex_unlock(&vhost_user.mutex); return -1; } @@ -526,6 +678,6 @@ rte_vhost_driver_unregister(const char *path) int rte_vhost_driver_session_start(void) { - fdset_event_dispatch(&g_vhost_server.fdset); + fdset_event_dispatch(&vhost_user.fdset); return 0; } diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.h b/lib/librte_vhost/vhost_user/vhost-net-user.h index e3bb4138..f5332396 100644 --- a/lib/librte_vhost/vhost_user/vhost-net-user.h +++ b/lib/librte_vhost/vhost_user/vhost-net-user.h @@ -38,15 +38,11 @@ #include #include "rte_virtio_net.h" -#include "fd_man.h" - -struct vhost_server { - char *path; /**< The path the uds is bind to. */ - int listenfd; /**< The listener sockfd. */ -}; /* refer to hw/virtio/vhost-user.c */ +#define VHOST_MEMORY_MAX_NREGIONS 8 + typedef enum VhostUserRequest { VHOST_USER_NONE = 0, VHOST_USER_GET_FEATURES = 1, diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.c b/lib/librte_vhost/vhost_user/virtio-net-user.c index f5248bc4..e7c43479 100644 --- a/lib/librte_vhost/vhost_user/virtio-net-user.c +++ b/lib/librte_vhost/vhost_user/virtio-net-user.c @@ -43,7 +43,6 @@ #include #include -#include "virtio-net.h" #include "virtio-net-user.h" #include "vhost-net-user.h" #include "vhost-net.h" @@ -64,9 +63,10 @@ static uint64_t get_blk_size(int fd) { struct stat stat; + int ret; - fstat(fd, &stat); - return (uint64_t)stat.st_blksize; + ret = fstat(fd, &stat); + return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } static void @@ -96,10 +96,14 @@ vhost_backend_cleanup(struct virtio_net *dev) free(dev->mem); dev->mem = NULL; } + if (dev->log_addr) { + munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); + dev->log_addr = 0; + } } int -user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) +user_set_mem_table(int vid, struct VhostUserMsg *pmsg) { struct VhostUserMemory memory = pmsg->payload.memory; struct virtio_memory_regions *pregion; @@ -110,13 +114,15 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) uint64_t alignment; /* unmap old memory regions one by one*/ - dev = get_device(ctx); + dev = get_device(vid); if (dev == NULL) return -1; /* Remove from the data plane. */ - if (dev->flags & VIRTIO_DEV_RUNNING) - notify_ops->destroy_device(dev); + if (dev->flags & VIRTIO_DEV_RUNNING) { + dev->flags &= ~VIRTIO_DEV_RUNNING; + notify_ops->destroy_device(vid); + } if (dev->mem) { free_mem_region(dev); @@ -130,8 +136,8 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) sizeof(struct orig_region_map) * memory.nregions); if (dev->mem == NULL) { RTE_LOG(ERR, VHOST_CONFIG, - "(%"PRIu64") Failed to allocate memory for dev->mem\n", - dev->device_fh); + "(%d) failed to allocate memory for dev->mem\n", + dev->vid); return -1; } dev->mem->nregions = memory.nregions; @@ -162,6 +168,11 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) * aligned. */ alignment = get_blk_size(pmsg->fds[idx]); + if (alignment == (uint64_t)-1) { + RTE_LOG(ERR, VHOST_CONFIG, + "couldn't get hugepage size through fstat\n"); + goto err_mmap; + } mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment); mapped_address = (uint64_t)(uintptr_t)mmap(NULL, @@ -252,7 +263,7 @@ virtio_is_ready(struct virtio_net *dev) } void -user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) +user_set_vring_call(int vid, struct VhostUserMsg *pmsg) { struct vhost_vring_file file; @@ -263,7 +274,7 @@ user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) file.fd = pmsg->fds[0]; RTE_LOG(INFO, VHOST_CONFIG, "vring call idx:%d file:%d\n", file.index, file.fd); - vhost_set_vring_call(ctx, &file); + vhost_set_vring_call(vid, &file); } @@ -272,10 +283,13 @@ user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) * device is ready for packet processing. */ void -user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) +user_set_vring_kick(int vid, struct VhostUserMsg *pmsg) { struct vhost_vring_file file; - struct virtio_net *dev = get_device(ctx); + struct virtio_net *dev = get_device(vid); + + if (!dev) + return; file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) @@ -284,30 +298,32 @@ user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg) file.fd = pmsg->fds[0]; RTE_LOG(INFO, VHOST_CONFIG, "vring kick idx:%d file:%d\n", file.index, file.fd); - vhost_set_vring_kick(ctx, &file); + vhost_set_vring_kick(vid, &file); - if (virtio_is_ready(dev) && - !(dev->flags & VIRTIO_DEV_RUNNING)) - notify_ops->new_device(dev); + if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) { + if (notify_ops->new_device(vid) == 0) + dev->flags |= VIRTIO_DEV_RUNNING; + } } /* * when virtio is stopped, qemu will send us the GET_VRING_BASE message. */ int -user_get_vring_base(struct vhost_device_ctx ctx, - struct vhost_vring_state *state) +user_get_vring_base(int vid, struct vhost_vring_state *state) { - struct virtio_net *dev = get_device(ctx); + struct virtio_net *dev = get_device(vid); if (dev == NULL) return -1; /* We have to stop the queue (virtio) if it is running. */ - if (dev->flags & VIRTIO_DEV_RUNNING) - notify_ops->destroy_device(dev); + if (dev->flags & VIRTIO_DEV_RUNNING) { + dev->flags &= ~VIRTIO_DEV_RUNNING; + notify_ops->destroy_device(vid); + } /* Here we are safe to get the last used index */ - vhost_get_vring_base(ctx, state->index, state); + vhost_get_vring_base(vid, state->index, state); RTE_LOG(INFO, VHOST_CONFIG, "vring base idx:%d file:%d\n", state->index, state->num); @@ -329,19 +345,21 @@ user_get_vring_base(struct vhost_device_ctx ctx, * enable the virtio queue pair. */ int -user_set_vring_enable(struct vhost_device_ctx ctx, - struct vhost_vring_state *state) +user_set_vring_enable(int vid, struct vhost_vring_state *state) { - struct virtio_net *dev = get_device(ctx); + struct virtio_net *dev; int enable = (int)state->num; + dev = get_device(vid); + if (dev == NULL) + return -1; + RTE_LOG(INFO, VHOST_CONFIG, "set queue enable: %d to qp idx: %d\n", enable, state->index); - if (notify_ops->vring_state_changed) { - notify_ops->vring_state_changed(dev, state->index, enable); - } + if (notify_ops->vring_state_changed) + notify_ops->vring_state_changed(vid, state->index, enable); dev->virtqueue[state->index]->enabled = enable; @@ -349,12 +367,11 @@ user_set_vring_enable(struct vhost_device_ctx ctx, } void -user_set_protocol_features(struct vhost_device_ctx ctx, - uint64_t protocol_features) +user_set_protocol_features(int vid, uint64_t protocol_features) { struct virtio_net *dev; - dev = get_device(ctx); + dev = get_device(vid); if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) return; @@ -362,15 +379,14 @@ user_set_protocol_features(struct vhost_device_ctx ctx, } int -user_set_log_base(struct vhost_device_ctx ctx, - struct VhostUserMsg *msg) +user_set_log_base(int vid, struct VhostUserMsg *msg) { struct virtio_net *dev; int fd = msg->fds[0]; uint64_t size, off; void *addr; - dev = get_device(ctx); + dev = get_device(vid); if (!dev) return -1; @@ -397,13 +413,21 @@ user_set_log_base(struct vhost_device_ctx ctx, * fail when offset is not page size aligned. */ addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); if (addr == MAP_FAILED) { RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n"); return -1; } - /* TODO: unmap on stop */ - dev->log_base = (uint64_t)(uintptr_t)addr + off; + /* + * Free previously mapped log memory on occasionally + * multiple VHOST_USER_SET_LOG_BASE. + */ + if (dev->log_addr) { + munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); + } + dev->log_addr = (uint64_t)(uintptr_t)addr; + dev->log_base = dev->log_addr + off; dev->log_size = size; return 0; @@ -418,12 +442,12 @@ user_set_log_base(struct vhost_device_ctx ctx, * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it. */ int -user_send_rarp(struct vhost_device_ctx ctx, struct VhostUserMsg *msg) +user_send_rarp(int vid, struct VhostUserMsg *msg) { struct virtio_net *dev; uint8_t *mac = (uint8_t *)&msg->payload.u64; - dev = get_device(ctx); + dev = get_device(vid); if (!dev) return -1; diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.h b/lib/librte_vhost/vhost_user/virtio-net-user.h index cefec162..e1b967b8 100644 --- a/lib/librte_vhost/vhost_user/virtio-net-user.h +++ b/lib/librte_vhost/vhost_user/virtio-net-user.h @@ -45,20 +45,18 @@ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\ (1ULL << VHOST_USER_PROTOCOL_F_RARP)) -int user_set_mem_table(struct vhost_device_ctx, struct VhostUserMsg *); +int user_set_mem_table(int, struct VhostUserMsg *); -void user_set_vring_call(struct vhost_device_ctx, struct VhostUserMsg *); +void user_set_vring_call(int, struct VhostUserMsg *); -void user_set_vring_kick(struct vhost_device_ctx, struct VhostUserMsg *); +void user_set_vring_kick(int, struct VhostUserMsg *); -void user_set_protocol_features(struct vhost_device_ctx ctx, - uint64_t protocol_features); -int user_set_log_base(struct vhost_device_ctx ctx, struct VhostUserMsg *); -int user_send_rarp(struct vhost_device_ctx ctx, struct VhostUserMsg *); +void user_set_protocol_features(int vid, uint64_t protocol_features); +int user_set_log_base(int vid, struct VhostUserMsg *); +int user_send_rarp(int vid, struct VhostUserMsg *); -int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *); +int user_get_vring_base(int, struct vhost_vring_state *); -int user_set_vring_enable(struct vhost_device_ctx ctx, - struct vhost_vring_state *state); +int user_set_vring_enable(int vid, struct vhost_vring_state *state); #endif -- cgit 1.2.3-korg