aboutsummaryrefslogtreecommitdiffstats
path: root/lib/librte_vhost
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-04-15 14:36:48 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-04-15 14:39:46 +0200
commite2bea7436061ca2e7e14bfcfdc5870f2555c3965 (patch)
tree46c62ce8f227674d6880430f9623edb4e77b9f9a /lib/librte_vhost
parenta4f0fa29488e582ab8b5ef9db475b3d26ded690c (diff)
New upstream version 18.11.1
Change-Id: Ic52e74a9ed6f3ae06acea4a27357bd7153efc2a3 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'lib/librte_vhost')
-rw-r--r--lib/librte_vhost/fd_man.c4
-rw-r--r--lib/librte_vhost/fd_man.h1
-rw-r--r--lib/librte_vhost/socket.c4
-rw-r--r--lib/librte_vhost/vhost.h6
-rw-r--r--lib/librte_vhost/vhost_crypto.c140
-rw-r--r--lib/librte_vhost/vhost_user.c22
-rw-r--r--lib/librte_vhost/virtio_net.c32
7 files changed, 159 insertions, 50 deletions
diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c
index 38347ab1..55d4856f 100644
--- a/lib/librte_vhost/fd_man.c
+++ b/lib/librte_vhost/fd_man.c
@@ -129,7 +129,9 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
pthread_mutex_lock(&pfdset->fd_mutex);
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
if (i == -1) {
+ pthread_mutex_lock(&pfdset->fd_pooling_mutex);
fdset_shrink_nolock(pfdset);
+ pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
if (i == -1) {
pthread_mutex_unlock(&pfdset->fd_mutex);
@@ -246,7 +248,9 @@ fdset_event_dispatch(void *arg)
numfds = pfdset->num;
pthread_mutex_unlock(&pfdset->fd_mutex);
+ pthread_mutex_lock(&pfdset->fd_pooling_mutex);
val = poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
+ pthread_mutex_unlock(&pfdset->fd_pooling_mutex);
if (val < 0)
continue;
diff --git a/lib/librte_vhost/fd_man.h b/lib/librte_vhost/fd_man.h
index 3331bcd9..3ab5cfdd 100644
--- a/lib/librte_vhost/fd_man.h
+++ b/lib/librte_vhost/fd_man.h
@@ -24,6 +24,7 @@ struct fdset {
struct pollfd rwfds[MAX_FDS];
struct fdentry fd[MAX_FDS];
pthread_mutex_t fd_mutex;
+ pthread_mutex_t fd_pooling_mutex;
int num; /* current fd number of this fdset */
union pipefds {
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 01b60ff9..9883b049 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -90,6 +90,7 @@ static struct vhost_user vhost_user = {
.fdset = {
.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER,
.num = 0
},
.vsocket_cnt = 0,
@@ -960,13 +961,13 @@ rte_vhost_driver_unregister(const char *path)
int count;
struct vhost_user_connection *conn, *next;
+again:
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
if (!strcmp(vsocket->path, path)) {
-again:
pthread_mutex_lock(&vsocket->conn_mutex);
for (conn = TAILQ_FIRST(&vsocket->conn_list);
conn != NULL;
@@ -982,6 +983,7 @@ again:
conn->connfd) == -1) {
pthread_mutex_unlock(
&vsocket->conn_mutex);
+ pthread_mutex_unlock(&vhost_user.mutex);
goto again;
}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5218f1b1..552b9298 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -393,8 +393,10 @@ vq_is_packed(struct virtio_net *dev)
static inline bool
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) &&
- wrap_counter != !!(desc->flags & VRING_DESC_F_USED);
+ uint16_t flags = *((volatile uint16_t *) &desc->flags);
+
+ return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(flags & VRING_DESC_F_USED);
}
#define VHOST_LOG_PAGE 4096
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index dd01afc0..0694c0a7 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -466,12 +466,17 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
}
static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+ uint32_t *nb_descs, uint32_t vq_size)
{
if (desc->flags & VRING_DESC_F_WRITE)
return desc;
while (desc->flags & VRING_DESC_F_NEXT) {
+ if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
+ return NULL;
+ (*nb_descs)--;
+
desc = &head[desc->next];
if (desc->flags & VRING_DESC_F_WRITE)
return desc;
@@ -481,13 +486,18 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
}
static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+ uint32_t *nb_descs, uint32_t vq_size)
{
uint64_t dlen;
struct virtio_crypto_inhdr *inhdr;
- while (desc->flags & VRING_DESC_F_NEXT)
+ while (desc->flags & VRING_DESC_F_NEXT) {
+ if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
+ return NULL;
+ (*nb_descs)--;
desc = &vc_req->head[desc->next];
+ }
dlen = desc->len;
inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
@@ -500,15 +510,16 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
static __rte_always_inline int
move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
- uint32_t size)
+ uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
{
struct vring_desc *desc = *cur_desc;
- int left = size;
-
- rte_prefetch0(&head[desc->next]);
- left -= desc->len;
+ int left = size - desc->len;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ (*nb_descs)--;
+ if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
+ return -1;
+
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
left -= desc->len;
@@ -517,7 +528,14 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
if (unlikely(left > 0))
return -1;
- *cur_desc = &head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else {
+ if (unlikely(desc->next >= vq_size))
+ return -1;
+ *cur_desc = &head[desc->next];
+ }
+
return 0;
}
@@ -539,7 +557,8 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
static int
copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
- struct vring_desc **cur_desc, uint32_t size)
+ struct vring_desc **cur_desc, uint32_t size,
+ uint32_t *nb_descs, uint32_t vq_size)
{
struct vring_desc *desc = *cur_desc;
uint64_t remain, addr, dlen, len;
@@ -548,7 +567,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
uint8_t *src;
int left = size;
- rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
@@ -582,6 +600,12 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
left -= to_copy;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
+ VC_LOG_ERR("Invalid descriptors");
+ return -1;
+ }
+ (*nb_descs)--;
+
desc = &vc_req->head[desc->next];
rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
@@ -624,7 +648,13 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
return -1;
}
- *cur_desc = &vc_req->head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else {
+ if (unlikely(desc->next >= vq_size))
+ return -1;
+ *cur_desc = &vc_req->head[desc->next];
+ }
return 0;
}
@@ -635,7 +665,6 @@ write_back_data(struct vhost_crypto_data_req *vc_req)
struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
while (wb_data) {
- rte_prefetch0(wb_data->next);
rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
wb_last = wb_data;
wb_data = wb_data->next;
@@ -684,7 +713,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
struct vhost_crypto_writeback_data **end_wb_data,
uint8_t *src,
uint32_t offset,
- uint64_t write_back_len)
+ uint64_t write_back_len,
+ uint32_t *nb_descs, uint32_t vq_size)
{
struct vhost_crypto_writeback_data *wb_data, *head;
struct vring_desc *desc = *cur_desc;
@@ -731,6 +761,12 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
offset -= desc->len;
while (write_back_len) {
+ if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
+ VC_LOG_ERR("Invalid descriptors");
+ goto error_exit;
+ }
+ (*nb_descs)--;
+
desc = &vc_req->head[desc->next];
if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
VC_LOG_ERR("incorrect descriptor");
@@ -770,7 +806,13 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
wb_data->next = NULL;
}
- *cur_desc = &vc_req->head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else {
+ if (unlikely(desc->next >= vq_size))
+ goto error_exit;
+ *cur_desc = &vc_req->head[desc->next];
+ }
*end_wb_data = wb_data;
@@ -787,7 +829,8 @@ static uint8_t
prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_cipher_data_req *cipher,
- struct vring_desc *cur_desc)
+ struct vring_desc *cur_desc,
+ uint32_t *nb_descs, uint32_t vq_size)
{
struct vring_desc *desc = cur_desc;
struct vhost_crypto_writeback_data *ewb = NULL;
@@ -797,8 +840,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* prepare */
/* iv */
- if (unlikely(copy_data(iv_data, vc_req, &desc,
- cipher->para.iv_len) < 0)) {
+ if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
+ nb_descs, vq_size) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -818,7 +861,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.src_data_len) < 0)) {
+ cipher->para.src_data_len, nb_descs,
+ vq_size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -835,8 +879,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, cipher->para.src_data_len)
- < 0)) {
+ vc_req, &desc, cipher->para.src_data_len,
+ nb_descs, vq_size) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -847,7 +891,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
/* dst */
- desc = find_write_desc(vc_req->head, desc);
+ desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
@@ -866,7 +910,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.dst_data_len) < 0)) {
+ cipher->para.dst_data_len,
+ nb_descs, vq_size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -877,7 +922,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
rte_pktmbuf_mtod(m_src, uint8_t *), 0,
- cipher->para.dst_data_len);
+ cipher->para.dst_data_len, nb_descs, vq_size);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -919,7 +964,8 @@ static uint8_t
prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_alg_chain_data_req *chain,
- struct vring_desc *cur_desc)
+ struct vring_desc *cur_desc,
+ uint32_t *nb_descs, uint32_t vq_size)
{
struct vring_desc *desc = cur_desc, *digest_desc;
struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
@@ -932,7 +978,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* prepare */
/* iv */
if (unlikely(copy_data(iv_data, vc_req, &desc,
- chain->para.iv_len) < 0)) {
+ chain->para.iv_len, nb_descs, vq_size) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -953,7 +999,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.src_data_len) < 0)) {
+ chain->para.src_data_len,
+ nb_descs, vq_size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -969,7 +1016,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, chain->para.src_data_len)) < 0) {
+ vc_req, &desc, chain->para.src_data_len,
+ nb_descs, vq_size)) < 0) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -981,7 +1029,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
/* dst */
- desc = find_write_desc(vc_req->head, desc);
+ desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
@@ -1000,7 +1048,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.dst_data_len) < 0)) {
+ chain->para.dst_data_len,
+ nb_descs, vq_size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1017,7 +1066,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.hash_result_len) < 0)) {
+ chain->para.hash_result_len,
+ nb_descs, vq_size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1029,7 +1079,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
rte_pktmbuf_mtod(m_src, uint8_t *),
chain->para.cipher_start_src_offset,
chain->para.dst_data_len -
- chain->para.cipher_start_src_offset);
+ chain->para.cipher_start_src_offset,
+ nb_descs, vq_size);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1042,14 +1093,16 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/** create a wb_data for digest */
ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
- digest_addr, 0, chain->para.hash_result_len);
+ digest_addr, 0, chain->para.hash_result_len,
+ nb_descs, vq_size);
if (unlikely(ewb->next == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
- chain->para.hash_result_len)) < 0) {
+ chain->para.hash_result_len,
+ nb_descs, vq_size)) < 0) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1108,6 +1161,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
struct vring_desc *desc = NULL;
uint64_t session_id;
uint64_t dlen;
+ uint32_t nb_descs = vq->size;
int err = 0;
vc_req->desc_idx = desc_idx;
@@ -1116,6 +1170,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
dlen = head->len;
+ nb_descs = dlen / sizeof(struct vring_desc);
+ /* drop invalid descriptors */
+ if (unlikely(nb_descs > vq->size))
+ return -1;
desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
&dlen, VHOST_ACCESS_RO);
if (unlikely(!desc || dlen != head->len))
@@ -1138,8 +1196,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
goto error_exit;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
req = &tmp_req;
- if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
- < 0)) {
+ if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
+ &nb_descs, vq->size) < 0)) {
err = VIRTIO_CRYPTO_BADMSG;
VC_LOG_ERR("Invalid descriptor");
goto error_exit;
@@ -1152,7 +1210,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
}
} else {
if (unlikely(move_desc(vc_req->head, &desc,
- sizeof(*req)) < 0)) {
+ sizeof(*req), &nb_descs, vq->size) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
goto error_exit;
}
@@ -1193,11 +1251,13 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
break;
case VIRTIO_CRYPTO_SYM_OP_CIPHER:
err = prepare_sym_cipher_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.cipher, desc);
+ &req->u.sym_req.u.cipher, desc,
+ &nb_descs, vq->size);
break;
case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
err = prepare_sym_chain_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.chain, desc);
+ &req->u.sym_req.u.chain, desc,
+ &nb_descs, vq->size);
break;
}
if (unlikely(err != 0)) {
@@ -1215,7 +1275,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
error_exit:
- inhdr = reach_inhdr(vc_req, desc);
+ inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
if (likely(inhdr != NULL))
inhdr->status = (uint8_t)err;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 3ea64eba..19e04c95 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -489,6 +489,9 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
struct rte_vhost_mem_region *r;
uint32_t i;
+ if (unlikely(!dev || !dev->mem))
+ goto out_error;
+
/* Find the region where the address lives. */
for (i = 0; i < dev->mem->nregions; i++) {
r = &dev->mem->regions[i];
@@ -503,6 +506,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
r->host_user_addr;
}
}
+out_error:
*len = 0;
return 0;
@@ -537,7 +541,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
{
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
struct vhost_vring_addr *addr = &vq->ring_addrs;
- uint64_t len;
+ uint64_t len, expected_len;
if (vq_is_packed(dev)) {
len = sizeof(struct vring_packed_desc) * vq->size;
@@ -603,11 +607,12 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
addr = &vq->ring_addrs;
len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
- if (vq->avail == 0 ||
- len != sizeof(struct vring_avail) +
- sizeof(uint16_t) * vq->size) {
+ if (vq->avail == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map avail ring.\n",
dev->vid);
@@ -616,10 +621,12 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
len = sizeof(struct vring_used) +
sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
- if (vq->used == 0 || len != sizeof(struct vring_used) +
- sizeof(struct vring_used_elem) * vq->size) {
+ if (vq->used == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map used ring.\n",
dev->vid);
@@ -726,13 +733,16 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
uint64_t host_phys_addr, uint64_t size)
{
struct guest_page *page, *last_page;
+ struct guest_page *old_pages;
if (dev->nr_guest_pages == dev->max_guest_pages) {
dev->max_guest_pages *= 2;
+ old_pages = dev->guest_pages;
dev->guest_pages = realloc(dev->guest_pages,
dev->max_guest_pages * sizeof(*page));
if (!dev->guest_pages) {
RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
+ free(old_pages);
return -1;
}
}
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5e1a1a72..15d682c3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -335,13 +335,22 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t vec_id = *vec_idx;
uint32_t len = 0;
uint64_t dlen;
+ uint32_t nr_descs = vq->size;
+ uint32_t cnt = 0;
struct vring_desc *descs = vq->desc;
struct vring_desc *idesc = NULL;
+ if (unlikely(idx >= vq->size))
+ return -1;
+
*desc_chain_head = idx;
if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
dlen = vq->desc[idx].len;
+ nr_descs = dlen / sizeof(struct vring_desc);
+ if (unlikely(nr_descs > vq->size))
+ return -1;
+
descs = (struct vring_desc *)(uintptr_t)
vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
&dlen,
@@ -366,7 +375,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
while (1) {
- if (unlikely(idx >= vq->size)) {
+ if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
free_ind_table(idesc);
return -1;
}
@@ -520,6 +529,12 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
return -1;
+ /*
+ * The ordering between desc flags and desc
+ * content reads need to be enforced.
+ */
+ rte_smp_rmb();
+
*desc_count = 0;
*len = 0;
@@ -527,6 +542,9 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(vec_id >= BUF_VECTOR_MAX))
return -1;
+ if (unlikely(*desc_count >= vq->size))
+ return -1;
+
*desc_count += 1;
*buf_id = descs[avail_idx].id;
@@ -791,6 +809,12 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
avail_head = *((volatile uint16_t *)&vq->avail->idx);
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ rte_smp_rmb();
+
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
uint16_t nr_vec = 0;
@@ -1373,6 +1397,12 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (free_entries == 0)
return 0;
+ /*
+ * The ordering between avail index and
+ * desc reads needs to be enforced.
+ */
+ rte_smp_rmb();
+
VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);