aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-14 11:13:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-14 11:13:28 +0000
commit8a853e3f0275efc8b05cb195085d45946942744a (patch)
treef7b349012231726c54fedd6c19f9cbf8234a4458 /lib
parent88fab00d4402af240c1b7cc2566133aece115488 (diff)
New upstream version 18.11-rc3upstream/18.11-rc3
Change-Id: I958b9d019027ef049bd992b3968a667f3ae382ae Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/librte_bpf/bpf_jit_x86.c28
-rw-r--r--lib/librte_eal/common/arch/x86/rte_memcpy.c29
-rw-r--r--lib/librte_eal/common/eal_common_dev.c3
-rw-r--r--lib/librte_eal/common/eal_common_devargs.c36
-rw-r--r--lib/librte_eal/common/eal_common_memory.c7
-rw-r--r--lib/librte_eal/common/eal_common_proc.c31
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_rtm.h19
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_spinlock.h21
-rw-r--r--lib/librte_eal/common/include/rte_common.h19
-rw-r--r--lib/librte_eal/common/include/rte_devargs.h4
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/rte_reciprocal.c17
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_alarm.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c1
-rw-r--r--lib/librte_ethdev/rte_ethdev.c93
-rw-r--r--lib/librte_hash/rte_cmp_x86.h2
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c307
-rw-r--r--lib/librte_pci/rte_pci.c4
-rw-r--r--lib/librte_pipeline/rte_table_action.c5
-rw-r--r--lib/librte_ring/rte_ring_c11_mem.h14
-rw-r--r--lib/librte_vhost/vhost_crypto.c460
-rw-r--r--lib/librte_vhost/vhost_user.c2
-rw-r--r--lib/librte_vhost/virtio_net.c4
23 files changed, 845 insertions, 265 deletions
diff --git a/lib/librte_bpf/bpf_jit_x86.c b/lib/librte_bpf/bpf_jit_x86.c
index 68ea389f..f70cd6be 100644
--- a/lib/librte_bpf/bpf_jit_x86.c
+++ b/lib/librte_bpf/bpf_jit_x86.c
@@ -209,6 +209,19 @@ emit_sib(struct bpf_jit_state *st, uint32_t scale, uint32_t idx, uint32_t base)
}
/*
+ * emit OPCODE+REGIDX byte
+ */
+static void
+emit_opcode(struct bpf_jit_state *st, uint8_t ops, uint32_t reg)
+{
+ uint8_t v;
+
+ v = ops | (reg & 7);
+ emit_bytes(st, &v, sizeof(v));
+}
+
+
+/*
* emit xchg %<sreg>, %<dreg>
*/
static void
@@ -472,19 +485,18 @@ static void
emit_ld_imm64(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm0,
uint32_t imm1)
{
+ uint32_t op;
+
const uint8_t ops = 0xB8;
- if (imm1 == 0) {
- emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, dreg, imm0);
- return;
- }
+ op = (imm1 == 0) ? BPF_ALU : EBPF_ALU64;
- emit_rex(st, EBPF_ALU64, 0, dreg);
- emit_bytes(st, &ops, sizeof(ops));
- emit_modregrm(st, MOD_DIRECT, 0, dreg);
+ emit_rex(st, op, 0, dreg);
+ emit_opcode(st, ops, dreg);
emit_imm(st, imm0, sizeof(imm0));
- emit_imm(st, imm1, sizeof(imm1));
+ if (imm1 != 0)
+ emit_imm(st, imm1, sizeof(imm1));
}
/*
diff --git a/lib/librte_eal/common/arch/x86/rte_memcpy.c b/lib/librte_eal/common/arch/x86/rte_memcpy.c
deleted file mode 100644
index 648c8f68..00000000
--- a/lib/librte_eal/common/arch/x86/rte_memcpy.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2017 Intel Corporation
- */
-
-#include <rte_memcpy.h>
-#include <rte_cpuflags.h>
-#include <rte_log.h>
-
-void *(*rte_memcpy_ptr)(void *dst, const void *src, size_t n) = NULL;
-
-RTE_INIT(rte_memcpy_init)
-{
-#ifdef CC_SUPPORT_AVX512F
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
- rte_memcpy_ptr = rte_memcpy_avx512f;
- RTE_LOG(DEBUG, EAL, "AVX512 memcpy is using!\n");
- return;
- }
-#endif
-#ifdef CC_SUPPORT_AVX2
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
- rte_memcpy_ptr = rte_memcpy_avx2;
- RTE_LOG(DEBUG, EAL, "AVX2 memcpy is using!\n");
- return;
- }
-#endif
- rte_memcpy_ptr = rte_memcpy_sse;
- RTE_LOG(DEBUG, EAL, "Default SSE/AVX memcpy is using!\n");
-}
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index 5759ec2d..1fdc9ab1 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -150,10 +150,11 @@ local_dev_probe(const char *devargs, struct rte_device **new_dev)
goto err_devarg;
}
- ret = rte_devargs_insert(da);
+ ret = rte_devargs_insert(&da);
if (ret)
goto err_devarg;
+ /* the rte_devargs will be referenced in the matching rte_device */
ret = da->bus->scan();
if (ret)
goto err_devarg;
diff --git a/lib/librte_eal/common/eal_common_devargs.c b/lib/librte_eal/common/eal_common_devargs.c
index b7b9cb69..1ccf12dc 100644
--- a/lib/librte_eal/common/eal_common_devargs.c
+++ b/lib/librte_eal/common/eal_common_devargs.c
@@ -263,14 +263,38 @@ rte_devargs_parsef(struct rte_devargs *da, const char *format, ...)
}
int __rte_experimental
-rte_devargs_insert(struct rte_devargs *da)
+rte_devargs_insert(struct rte_devargs **da)
{
- int ret;
+ struct rte_devargs *listed_da;
+ void *tmp;
+
+ if (*da == NULL || (*da)->bus == NULL)
+ return -1;
- ret = rte_devargs_remove(da);
- if (ret < 0)
- return ret;
- TAILQ_INSERT_TAIL(&devargs_list, da, next);
+ TAILQ_FOREACH_SAFE(listed_da, &devargs_list, next, tmp) {
+ if (listed_da == *da)
+ /* devargs already in the list */
+ return 0;
+ if (strcmp(listed_da->bus->name, (*da)->bus->name) == 0 &&
+ strcmp(listed_da->name, (*da)->name) == 0) {
+ /* device already in devargs list, must be updated */
+ listed_da->type = (*da)->type;
+ listed_da->policy = (*da)->policy;
+ free(listed_da->args);
+ listed_da->args = (*da)->args;
+ listed_da->bus = (*da)->bus;
+ listed_da->cls = (*da)->cls;
+ listed_da->bus_str = (*da)->bus_str;
+ listed_da->cls_str = (*da)->cls_str;
+ listed_da->data = (*da)->data;
+ /* replace provided devargs with found one */
+ free(*da);
+ *da = listed_da;
+ return 0;
+ }
+ }
+ /* new device in the list */
+ TAILQ_INSERT_TAIL(&devargs_list, *da, next);
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 87fd9921..d47ea493 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -439,11 +439,7 @@ check_iova(const struct rte_memseg_list *msl __rte_unused,
return 1;
}
-#if defined(RTE_ARCH_64)
#define MAX_DMA_MASK_BITS 63
-#else
-#define MAX_DMA_MASK_BITS 31
-#endif
/* check memseg iovas are within the required range based on dma mask */
static int __rte_experimental
@@ -453,7 +449,8 @@ check_dma_mask(uint8_t maskbits, bool thread_unsafe)
uint64_t mask;
int ret;
- /* sanity check */
+ /* Sanity check. We only check width can be managed with 64 bits
+ * variables. Indeed any higher value is likely wrong. */
if (maskbits > MAX_DMA_MASK_BITS) {
RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
maskbits, MAX_DMA_MASK_BITS);
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index 97663d3b..f65ef56c 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -800,7 +800,7 @@ mp_request_async(const char *dst, struct rte_mp_msg *req,
{
struct rte_mp_msg *reply_msg;
struct pending_request *pending_req, *exist;
- int ret;
+ int ret = -1;
pending_req = calloc(1, sizeof(*pending_req));
reply_msg = calloc(1, sizeof(*reply_msg));
@@ -827,6 +827,28 @@ mp_request_async(const char *dst, struct rte_mp_msg *req,
goto fail;
}
+ /*
+ * set the alarm before sending message. there are two possible error
+ * scenarios to consider here:
+ *
+ * - if the alarm set fails, we free the memory right there
+ * - if the alarm set succeeds but sending message fails, then the alarm
+ * will trigger and clean up the memory
+ *
+ * Even if the alarm triggers too early (i.e. immediately), we're still
+ * holding the lock to pending requests queue, so the interrupt thread
+ * will just spin until we release the lock, and either release the
+ * memory, or doesn't find any pending requests in the queue because we
+ * never added any due to send message failure.
+ */
+ if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
+ async_reply_handle, pending_req) < 0) {
+ RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
+ dst, req->name);
+ ret = -1;
+ goto fail;
+ }
+
ret = send_msg(dst, req, MP_REQ);
if (ret < 0) {
RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
@@ -841,13 +863,6 @@ mp_request_async(const char *dst, struct rte_mp_msg *req,
param->user_reply.nb_sent++;
- if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
- async_reply_handle, pending_req) < 0) {
- RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
- dst, req->name);
- rte_panic("Fix the above shit to properly free all memory\n");
- }
-
return 0;
fail:
free(pending_req);
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
index ab099952..eb0f8e81 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
@@ -1,21 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2012,2013 Intel Corporation
+ */
+
#ifndef _RTE_RTM_H_
#define _RTE_RTM_H_ 1
-/*
- * Copyright (c) 2012,2013 Intel Corporation
- * Author: Andi Kleen
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that: (1) source code distributions
- * retain the above copyright notice and this paragraph in its entirety, (2)
- * distributions including binary code include the above copyright notice and
- * this paragraph in its entirety in the documentation or other materials
- * provided with the distribution
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
/* Official RTM intrinsics interface matching gcc/icc, but works
on older gcc compatible compilers and binutils. */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 60321da0..e2e2b264 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -15,8 +15,9 @@ extern "C" {
#include "rte_branch_prediction.h"
#include "rte_common.h"
#include "rte_pause.h"
+#include "rte_cycles.h"
-#define RTE_RTM_MAX_RETRIES (10)
+#define RTE_RTM_MAX_RETRIES (20)
#define RTE_XABORT_LOCK_BUSY (0xff)
#ifndef RTE_FORCE_INTRINSICS
@@ -76,7 +77,7 @@ static inline int rte_tm_supported(void)
static inline int
rte_try_tm(volatile int *lock)
{
- int retries;
+ int i, retries;
if (!rte_rtm_supported)
return 0;
@@ -96,9 +97,21 @@ rte_try_tm(volatile int *lock)
while (*lock)
rte_pause();
- if ((status & RTE_XABORT_EXPLICIT) &&
- (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
+ if ((status & RTE_XABORT_CONFLICT) ||
+ ((status & RTE_XABORT_EXPLICIT) &&
+ (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
+ /* add a small delay before retrying, basing the
+ * delay on the number of times we've already tried,
+ * to give a back-off type of behaviour. We
+ * randomize trycount by taking bits from the tsc count
+ */
+ int try_count = RTE_RTM_MAX_RETRIES - retries;
+ int pause_count = (rte_rdtsc() & 0x7) | 1;
+ pause_count <<= try_count;
+ for (i = 0; i < pause_count; i++)
+ rte_pause();
continue;
+ }
if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
break;
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index cba7bbc1..87f0f630 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -473,6 +473,25 @@ rte_log2_u32(uint32_t v)
return rte_bsf32(v);
}
+
+/**
+ * Return the last (most-significant) bit set.
+ *
+ * @note The last (most significant) bit is at position 32.
+ * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32
+ *
+ * @param x
+ * The input parameter.
+ * @return
+ * The last (most-significant) bit set, or 0 if the input is 0.
+ */
+static inline int
+rte_fls_u32(uint32_t x)
+{
+ return (x == 0) ? 0 : 32 - __builtin_clz(x);
+}
+
+
#ifndef offsetof
/** Return the offset of a field in a structure. */
#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
diff --git a/lib/librte_eal/common/include/rte_devargs.h b/lib/librte_eal/common/include/rte_devargs.h
index b1f121f8..29b3fb7c 100644
--- a/lib/librte_eal/common/include/rte_devargs.h
+++ b/lib/librte_eal/common/include/rte_devargs.h
@@ -146,6 +146,8 @@ __attribute__((format(printf, 2, 0)));
*
* @param da
* The devargs structure to insert.
+ * If a devargs for the same device is already inserted,
+ * it will be updated and returned. It means *da pointer can change.
*
* @return
* - 0 on success
@@ -153,7 +155,7 @@ __attribute__((format(printf, 2, 0)));
*/
__rte_experimental
int
-rte_devargs_insert(struct rte_devargs *da);
+rte_devargs_insert(struct rte_devargs **da);
/**
* Add a device to the user device list
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 80c516d3..fc26e97a 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -49,7 +49,7 @@ extern "C" {
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 2
+#define RTE_VER_RELEASE 3
/**
* Macro to compute a version number usable for comparisons
diff --git a/lib/librte_eal/common/rte_reciprocal.c b/lib/librte_eal/common/rte_reciprocal.c
index d81b11db..f017d0c2 100644
--- a/lib/librte_eal/common/rte_reciprocal.c
+++ b/lib/librte_eal/common/rte_reciprocal.c
@@ -41,28 +41,13 @@
#include "rte_reciprocal.h"
-/* find largest set bit.
- * portable and slow but does not matter for this usage.
- */
-static inline int fls(uint32_t x)
-{
- int b;
-
- for (b = 31; b >= 0; --b) {
- if (x & (1u << b))
- return b + 1;
- }
-
- return 0;
-}
-
struct rte_reciprocal rte_reciprocal_value(uint32_t d)
{
struct rte_reciprocal R;
uint64_t m;
int l;
- l = fls(d - 1);
+ l = rte_fls_u32(d - 1);
m = ((1ULL << 32) * ((1ULL << l) - d));
m /= d;
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index 391d2a65..840ede78 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -30,7 +30,9 @@
#define NS_PER_US 1000
#define US_PER_MS 1000
#define MS_PER_S 1000
+#ifndef US_PER_S
#define US_PER_S (US_PER_MS * MS_PER_S)
+#endif
#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
#define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index c1b5e079..48b23ce1 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1617,6 +1617,7 @@ eal_legacy_hugepage_init(void)
tmp_hp = NULL;
munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
+ hugepage = NULL;
/* we're not going to allocate more pages, so release VA space for
* unused memseg lists
diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 8eaa5fcc..5f858174 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -1092,8 +1092,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- struct rte_eth_conf local_conf = *dev_conf;
+ struct rte_eth_conf orig_conf;
int diag;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1102,6 +1103,22 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ if (dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be stopped to allow configuration\n",
+ port_id);
+ return -EBUSY;
+ }
+
+ /* Store original config, as rollback required on failure */
+ memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
+
+ /*
+ * Copy the dev_conf parameter into the dev structure.
+ * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
+ */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
rte_eth_dev_info_get(port_id, &dev_info);
/* If number of queues specified by application for both Rx and Tx is
@@ -1123,26 +1140,18 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
RTE_ETHDEV_LOG(ERR,
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
- return -EINVAL;
- }
-
- if (dev->data->dev_started) {
- RTE_ETHDEV_LOG(ERR,
- "Port %u must be stopped to allow configuration\n",
- port_id);
- return -EBUSY;
+ ret = -EINVAL;
+ goto rollback;
}
- /* Copy the dev_conf parameter into the dev structure */
- memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
-
/*
* Check that the numbers of RX and TX queues are not greater
* than the maximum number of RX and TX queues supported by the
@@ -1151,13 +1160,15 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
if (nb_rx_q > dev_info.max_rx_queues) {
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if (nb_tx_q > dev_info.max_tx_queues) {
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
port_id, nb_tx_q, dev_info.max_tx_queues);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/* Check that the device supports requested interrupts */
@@ -1165,32 +1176,36 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
dev->device->driver->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if ((dev_conf->intr_conf.rmv == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
dev->device->driver->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/*
* If jumbo frames are enabled, check that the maximum RX packet
* length is supported by the configured device.
*/
- if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
port_id, dev_conf->rxmode.max_rx_pkt_len,
dev_info.max_rx_pktlen);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
port_id, dev_conf->rxmode.max_rx_pkt_len,
(unsigned)ETHER_MIN_LEN);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
} else {
if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
@@ -1201,25 +1216,27 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
}
/* Any requested offloading must be within its device capabilities */
- if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
- local_conf.rxmode.offloads) {
+ if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
+ dev_conf->rxmode.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
"capabilities 0x%"PRIx64" in %s()\n",
- port_id, local_conf.rxmode.offloads,
+ port_id, dev_conf->rxmode.offloads,
dev_info.rx_offload_capa,
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
- if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
- local_conf.txmode.offloads) {
+ if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
+ dev_conf->txmode.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
"capabilities 0x%"PRIx64" in %s()\n",
- port_id, local_conf.txmode.offloads,
+ port_id, dev_conf->txmode.offloads,
dev_info.tx_offload_capa,
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/* Check that device supports requested rss hash functions. */
@@ -1230,7 +1247,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
dev_info.flow_type_rss_offloads);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/*
@@ -1241,7 +1259,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
RTE_ETHDEV_LOG(ERR,
"Port%u rte_eth_dev_rx_queue_config = %d\n",
port_id, diag);
- return diag;
+ ret = diag;
+ goto rollback;
}
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
@@ -1250,7 +1269,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
"Port%u rte_eth_dev_tx_queue_config = %d\n",
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
- return diag;
+ ret = diag;
+ goto rollback;
}
diag = (*dev->dev_ops->dev_configure)(dev);
@@ -1259,7 +1279,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return eth_err(port_id, diag);
+ ret = eth_err(port_id, diag);
+ goto rollback;
}
/* Initialize Rx profiling if enabled at compilation time. */
@@ -1269,10 +1290,16 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return eth_err(port_id, diag);
+ ret = eth_err(port_id, diag);
+ goto rollback;
}
return 0;
+
+rollback:
+ memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
+
+ return ret;
}
void
diff --git a/lib/librte_hash/rte_cmp_x86.h b/lib/librte_hash/rte_cmp_x86.h
index e82b4c08..13a58363 100644
--- a/lib/librte_hash/rte_cmp_x86.h
+++ b/lib/librte_hash/rte_cmp_x86.h
@@ -2,6 +2,8 @@
* Copyright(c) 2015 Intel Corporation
*/
+#include <rte_vect.h>
+
/* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
static int
rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len __rte_unused)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 5ddcccd8..c55a4f26 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -13,7 +13,6 @@
#include <rte_common.h>
#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_log.h>
-#include <rte_memcpy.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_malloc.h>
@@ -982,7 +981,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
new_idx = (uint32_t)((uintptr_t) slot_id);
/* Copy key */
- rte_memcpy(new_k->key, key, h->key_len);
+ memcpy(new_k->key, key, h->key_len);
/* Key can be of arbitrary length, so it is not possible to store
* it atomically. Hence the new key element's memory stores
* (key as well as data) should be complete before it is referenced.
@@ -1129,9 +1128,38 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
return ret;
}
+/* Search one bucket to find the match key - uses rw lock */
+static inline int32_t
+search_one_bucket_l(const struct rte_hash *h, const void *key,
+ uint16_t sig, void **data,
+ const struct rte_hash_bucket *bkt)
+{
+ int i;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->sig_current[i] == sig &&
+ bkt->key_idx[i] != EMPTY_SLOT) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ if (data != NULL)
+ *data = k->pdata;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bkt->key_idx[i] - 1;
+ }
+ }
+ }
+ return -1;
+}
+
/* Search one bucket to find the match key */
static inline int32_t
-search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
+search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
void **data, const struct rte_hash_bucket *bkt)
{
int i;
@@ -1163,12 +1191,11 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
}
static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig, void **data)
+__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
- uint32_t cnt_b, cnt_a;
int ret;
uint16_t short_sig;
@@ -1176,8 +1203,48 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
prim_bucket_idx = get_prim_bucket_index(h, sig);
sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+ bkt = &h->buckets[prim_bucket_idx];
+
__hash_rw_reader_lock(h);
+ /* Check if key is in primary location */
+ ret = search_one_bucket_l(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
+
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket_l(h, key, short_sig,
+ data, cur_bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ return -ENOENT;
+}
+
+static inline int32_t
+__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ struct rte_hash_bucket *bkt, *cur_bkt;
+ uint32_t cnt_b, cnt_a;
+ int ret;
+ uint16_t short_sig;
+
+ short_sig = get_short_sig(sig);
+ prim_bucket_idx = get_prim_bucket_index(h, sig);
+ sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -1188,7 +1255,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
@@ -1198,7 +1265,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in secondary location */
FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
+ ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
@@ -1222,11 +1289,19 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
__ATOMIC_ACQUIRE);
} while (cnt_b != cnt_a);
- __hash_rw_reader_unlock(h);
-
return -ENOENT;
}
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
+ else
+ return __rte_hash_lookup_with_hash_l(h, key, sig, data);
+}
+
int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig)
@@ -1528,7 +1603,197 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __hash_rw_reader_lock(h);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* all found, do not need to go through ext bkt */
+ if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ __hash_rw_reader_unlock(h);
+ return;
+ }
+
+ /* need to check ext buckets for match */
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
uint64_t *hit_mask, void *data[])
{
@@ -1586,7 +1851,6 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
rte_prefetch0(secondary_bkt[i]);
}
- __hash_rw_reader_lock(h);
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -1735,10 +1999,10 @@ next_key:
next_bkt = secondary_bkt[i]->next;
FOR_EACH_BUCKET(cur_bkt, next_bkt) {
if (data != NULL)
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], &data[i], cur_bkt);
else
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], NULL, cur_bkt);
if (ret != -1) {
positions[i] = ret;
@@ -1748,12 +2012,23 @@ next_key:
}
}
- __hash_rw_reader_unlock(h);
-
if (hit_mask != NULL)
*hit_mask = hits;
}
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_bulk_lf(h, keys, num_keys,
+ positions, hit_mask, data);
+ else
+ return __rte_hash_lookup_bulk_l(h, keys, num_keys,
+ positions, hit_mask, data);
+}
+
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions)
diff --git a/lib/librte_pci/rte_pci.c b/lib/librte_pci/rte_pci.c
index 530738db..f400178b 100644
--- a/lib/librte_pci/rte_pci.c
+++ b/lib/librte_pci/rte_pci.c
@@ -30,6 +30,10 @@ get_u8_pciaddr_field(const char *in, void *_u8, char dlm)
uint8_t *u8 = _u8;
char *end;
+ /* empty string is an error though strtoul() returns 0 */
+ if (*in == '\0')
+ return NULL;
+
errno = 0;
val = strtoul(in, &end, 16);
if (errno != 0 || end[0] != dlm || val > UINT8_MAX) {
diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c
index 537e6593..7c7c8dd8 100644
--- a/lib/librte_pipeline/rte_table_action.c
+++ b/lib/librte_pipeline/rte_table_action.c
@@ -1694,10 +1694,9 @@ get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
rte_cryptodev_info_get(cdev_id, &dev_info);
- for (i = 0;; i++) {
+ for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
cap = &dev_info.capabilities[i];
- if (!cap)
- break;
if (cap->sym.xform_type != xform->type)
continue;
diff --git a/lib/librte_ring/rte_ring_c11_mem.h b/lib/librte_ring/rte_ring_c11_mem.h
index 7bc74a4c..0fb73a33 100644
--- a/lib/librte_ring/rte_ring_c11_mem.h
+++ b/lib/librte_ring/rte_ring_c11_mem.h
@@ -61,11 +61,14 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
unsigned int max = n;
int success;
- *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_ACQUIRE);
+ *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_RELAXED);
do {
/* Reset n to the initial burst count */
n = max;
+ /* Ensure the head is read before tail */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
/* load-acquire synchronize with store-release of ht->tail
* in update_tail.
*/
@@ -94,7 +97,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
/* on failure, *old_head is updated */
success = __atomic_compare_exchange_n(&r->prod.head,
old_head, *new_head,
- 0, __ATOMIC_ACQUIRE,
+ 0, __ATOMIC_RELAXED,
__ATOMIC_RELAXED);
} while (unlikely(success == 0));
return n;
@@ -134,11 +137,14 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
int success;
/* move cons.head atomically */
- *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_ACQUIRE);
+ *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);
do {
/* Restore n as it may change every loop */
n = max;
+ /* Ensure the head is read before tail */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+
/* this load-acquire synchronize with store-release of ht->tail
* in update_tail.
*/
@@ -166,7 +172,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
/* on failure, *old_head will be updated */
success = __atomic_compare_exchange_n(&r->cons.head,
old_head, *new_head,
- 0, __ATOMIC_ACQUIRE,
+ 0, __ATOMIC_RELAXED,
__ATOMIC_RELAXED);
} while (unlikely(success == 0));
return n;
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 5472bead..dd01afc0 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -198,6 +198,7 @@ struct vhost_crypto {
struct rte_hash *session_map;
struct rte_mempool *mbuf_pool;
struct rte_mempool *sess_pool;
+ struct rte_mempool *wb_pool;
/** DPDK cryptodev ID */
uint8_t cid;
@@ -215,13 +216,20 @@ struct vhost_crypto {
uint8_t option;
} __rte_cache_aligned;
+struct vhost_crypto_writeback_data {
+ uint8_t *src;
+ uint8_t *dst;
+ uint64_t len;
+ struct vhost_crypto_writeback_data *next;
+};
+
struct vhost_crypto_data_req {
struct vring_desc *head;
struct virtio_net *dev;
struct virtio_crypto_inhdr *inhdr;
struct vhost_virtqueue *vq;
- struct vring_desc *wb_desc;
- uint16_t wb_len;
+ struct vhost_crypto_writeback_data *wb;
+ struct rte_mempool *wb_pool;
uint16_t desc_idx;
uint16_t len;
uint16_t zero_copy;
@@ -506,15 +514,29 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
left -= desc->len;
}
- if (unlikely(left > 0)) {
- VC_LOG_ERR("Incorrect virtio descriptor");
+ if (unlikely(left > 0))
return -1;
- }
*cur_desc = &head[desc->next];
return 0;
}
+static __rte_always_inline void *
+get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+ uint8_t perm)
+{
+ void *data;
+ uint64_t dlen = cur_desc->len;
+
+ data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
+ if (unlikely(!data || dlen != cur_desc->len)) {
+ VC_LOG_ERR("Failed to map object");
+ return NULL;
+ }
+
+ return data;
+}
+
static int
copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
struct vring_desc **cur_desc, uint32_t size)
@@ -531,10 +553,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
VHOST_ACCESS_RO);
- if (unlikely(!src || !dlen)) {
- VC_LOG_ERR("Failed to map descriptor");
+ if (unlikely(!src || !dlen))
return -1;
- }
rte_memcpy((uint8_t *)data, src, dlen);
data += dlen;
@@ -609,73 +629,158 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
return 0;
}
-static __rte_always_inline void *
-get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,
- uint32_t size, uint8_t perm)
+static void
+write_back_data(struct vhost_crypto_data_req *vc_req)
{
- void *data;
- uint64_t dlen = (*cur_desc)->len;
-
- data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);
- if (unlikely(!data || dlen != (*cur_desc)->len)) {
- VC_LOG_ERR("Failed to map object");
- return NULL;
+ struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
+
+ while (wb_data) {
+ rte_prefetch0(wb_data->next);
+ rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
+ wb_last = wb_data;
+ wb_data = wb_data->next;
+ rte_mempool_put(vc_req->wb_pool, wb_last);
}
+}
- if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))
- return NULL;
+static void
+free_wb_data(struct vhost_crypto_writeback_data *wb_data,
+ struct rte_mempool *mp)
+{
+ while (wb_data->next != NULL)
+ free_wb_data(wb_data->next, mp);
- return data;
+ rte_mempool_put(mp, wb_data);
}
-static int
-write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)
+/**
+ * The function will allocate a vhost_crypto_writeback_data linked list
+ * containing the source and destination data pointers for the write back
+ * operation after dequeued from Cryptodev PMD queues.
+ *
+ * @param vc_req
+ * The vhost crypto data request pointer
+ * @param cur_desc
+ * The pointer of the current in use descriptor pointer. The content of
+ * cur_desc is expected to be updated after the function execution.
+ * @param end_wb_data
+ * The last write back data element to be returned. It is used only in cipher
+ * and hash chain operations.
+ * @param src
+ * The source data pointer
+ * @param offset
+ * The offset to both source and destination data. For source data the offset
+ * is the number of bytes between src and start point of cipher operation. For
+ * destination data the offset is the number of bytes from *cur_desc->addr
+ * to the point where the src will be written to.
+ * @param write_back_len
+ * The size of the write back length.
+ * @return
+ * The pointer to the start of the write back data linked list.
+ */
+static struct vhost_crypto_writeback_data *
+prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+ struct vring_desc **cur_desc,
+ struct vhost_crypto_writeback_data **end_wb_data,
+ uint8_t *src,
+ uint32_t offset,
+ uint64_t write_back_len)
{
- struct rte_mbuf *mbuf = op->sym->m_dst;
- struct vring_desc *head = vc_req->head;
- struct vring_desc *desc = vc_req->wb_desc;
- int left = vc_req->wb_len;
- uint32_t to_write;
- uint8_t *src_data = mbuf->buf_addr, *dst;
+ struct vhost_crypto_writeback_data *wb_data, *head;
+ struct vring_desc *desc = *cur_desc;
uint64_t dlen;
+ uint8_t *dst;
+ int ret;
- rte_prefetch0(&head[desc->next]);
- to_write = RTE_MIN(desc->len, (uint32_t)left);
- dlen = desc->len;
- dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
- VHOST_ACCESS_RW);
- if (unlikely(!dst || dlen != desc->len)) {
- VC_LOG_ERR("Failed to map descriptor");
- return -1;
+ ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
+ if (unlikely(ret < 0)) {
+ VC_LOG_ERR("no memory");
+ goto error_exit;
}
- rte_memcpy(dst, src_data, to_write);
- left -= to_write;
- src_data += to_write;
+ wb_data = head;
- while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
- desc = &head[desc->next];
- rte_prefetch0(&head[desc->next]);
- to_write = RTE_MIN(desc->len, (uint32_t)left);
+ if (likely(desc->len > offset)) {
+ wb_data->src = src + offset;
dlen = desc->len;
- dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
- VHOST_ACCESS_RW);
+ dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
+ &dlen, VHOST_ACCESS_RW) + offset;
if (unlikely(!dst || dlen != desc->len)) {
VC_LOG_ERR("Failed to map descriptor");
- return -1;
+ goto error_exit;
}
- rte_memcpy(dst, src_data, to_write);
- left -= to_write;
- src_data += to_write;
- }
+ wb_data->dst = dst;
+ wb_data->len = desc->len - offset;
+ write_back_len -= wb_data->len;
+ src += offset + wb_data->len;
+ offset = 0;
+
+ if (unlikely(write_back_len)) {
+ ret = rte_mempool_get(vc_req->wb_pool,
+ (void **)&(wb_data->next));
+ if (unlikely(ret < 0)) {
+ VC_LOG_ERR("no memory");
+ goto error_exit;
+ }
- if (unlikely(left < 0)) {
- VC_LOG_ERR("Incorrect virtio descriptor");
- return -1;
+ wb_data = wb_data->next;
+ } else
+ wb_data->next = NULL;
+ } else
+ offset -= desc->len;
+
+ while (write_back_len) {
+ desc = &vc_req->head[desc->next];
+ if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
+ VC_LOG_ERR("incorrect descriptor");
+ goto error_exit;
+ }
+
+ if (desc->len <= offset) {
+ offset -= desc->len;
+ continue;
+ }
+
+ dlen = desc->len;
+ dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+ VHOST_ACCESS_RW) + offset;
+ if (unlikely(dst == NULL || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ goto error_exit;
+ }
+
+ wb_data->src = src;
+ wb_data->dst = dst;
+ wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
+ write_back_len -= wb_data->len;
+ src += wb_data->len;
+ offset = 0;
+
+ if (write_back_len) {
+ ret = rte_mempool_get(vc_req->wb_pool,
+ (void **)&(wb_data->next));
+ if (unlikely(ret < 0)) {
+ VC_LOG_ERR("no memory");
+ goto error_exit;
+ }
+
+ wb_data = wb_data->next;
+ } else
+ wb_data->next = NULL;
}
- return 0;
+ *cur_desc = &vc_req->head[desc->next];
+
+ *end_wb_data = wb_data;
+
+ return head;
+
+error_exit:
+ if (head)
+ free_wb_data(head, vc_req->wb_pool);
+
+ return NULL;
}
static uint8_t
@@ -685,6 +790,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vring_desc *cur_desc)
{
struct vring_desc *desc = cur_desc;
+ struct vhost_crypto_writeback_data *ewb = NULL;
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
uint8_t ret = 0;
@@ -703,16 +809,25 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
cipher->para.src_data_len);
- m_src->buf_addr = get_data_ptr(vc_req, &desc,
- cipher->para.src_data_len, VHOST_ACCESS_RO);
+ m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
if (unlikely(m_src->buf_iova == 0 ||
m_src->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+
+ if (unlikely(move_desc(vc_req->head, &desc,
+ cipher->para.src_data_len) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ vc_req->wb_pool = vcrypto->wb_pool;
+
if (unlikely(cipher->para.src_data_len >
RTE_MBUF_DEFAULT_BUF_SIZE)) {
VC_LOG_ERR("Not enough space to do data copy");
@@ -743,24 +858,31 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
desc->addr, cipher->para.dst_data_len);
- m_dst->buf_addr = get_data_ptr(vc_req, &desc,
- cipher->para.dst_data_len, VHOST_ACCESS_RW);
+ m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+ if (unlikely(move_desc(vc_req->head, &desc,
+ cipher->para.dst_data_len) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
m_dst->data_len = cipher->para.dst_data_len;
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
- vc_req->wb_desc = desc;
- vc_req->wb_len = cipher->para.dst_data_len;
- if (unlikely(move_desc(vc_req->head, &desc,
- vc_req->wb_len) < 0)) {
+ vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+ rte_pktmbuf_mtod(m_src, uint8_t *), 0,
+ cipher->para.dst_data_len);
+ if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+
break;
default:
ret = VIRTIO_CRYPTO_BADMSG;
@@ -774,7 +896,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
op->sym->cipher.data.offset = 0;
op->sym->cipher.data.length = cipher->para.src_data_len;
- vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+ vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
if (unlikely(vc_req->inhdr == NULL)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
@@ -786,6 +908,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
return 0;
error_exit:
+ if (vc_req->wb)
+ free_wb_data(vc_req->wb, vc_req->wb_pool);
+
vc_req->len = INHDR_LEN;
return ret;
}
@@ -796,7 +921,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct virtio_crypto_alg_chain_data_req *chain,
struct vring_desc *cur_desc)
{
- struct vring_desc *desc = cur_desc;
+ struct vring_desc *desc = cur_desc, *digest_desc;
+ struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
uint32_t digest_offset;
@@ -812,21 +938,30 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
m_src->data_len = chain->para.src_data_len;
- m_dst->data_len = chain->para.dst_data_len;
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_dst->data_len = chain->para.dst_data_len;
+
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
chain->para.src_data_len);
- m_src->buf_addr = get_data_ptr(vc_req, &desc,
- chain->para.src_data_len, VHOST_ACCESS_RO);
+ m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+
+ if (unlikely(move_desc(vc_req->head, &desc,
+ chain->para.src_data_len) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ vc_req->wb_pool = vcrypto->wb_pool;
+
if (unlikely(chain->para.src_data_len >
RTE_MBUF_DEFAULT_BUF_SIZE)) {
VC_LOG_ERR("Not enough space to do data copy");
@@ -838,6 +973,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
+
break;
default:
ret = VIRTIO_CRYPTO_BADMSG;
@@ -856,46 +992,70 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
desc->addr, chain->para.dst_data_len);
- m_dst->buf_addr = get_data_ptr(vc_req, &desc,
- chain->para.dst_data_len, VHOST_ACCESS_RW);
+ m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+ if (unlikely(move_desc(vc_req->head, &desc,
+ chain->para.dst_data_len) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
desc->addr, chain->para.hash_result_len);
- op->sym->auth.digest.data = get_data_ptr(vc_req, &desc,
- chain->para.hash_result_len, VHOST_ACCESS_RW);
+ op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
+ VHOST_ACCESS_RW);
if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
VC_LOG_ERR("zero_copy may fail due to cross page data");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+
+ if (unlikely(move_desc(vc_req->head, &desc,
+ chain->para.hash_result_len) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
- digest_offset = m_dst->data_len;
- digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *,
- digest_offset);
+ vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+ rte_pktmbuf_mtod(m_src, uint8_t *),
+ chain->para.cipher_start_src_offset,
+ chain->para.dst_data_len -
+ chain->para.cipher_start_src_offset);
+ if (unlikely(vc_req->wb == NULL)) {
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
- vc_req->wb_desc = desc;
- vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len;
+ digest_offset = m_src->data_len;
+ digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
+ digest_offset);
+ digest_desc = desc;
- if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.dst_data_len) < 0)) {
- ret = VIRTIO_CRYPTO_BADMSG;
+ /** create a wb_data for digest */
+ ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
+ digest_addr, 0, chain->para.hash_result_len);
+ if (unlikely(ewb->next == NULL)) {
+ ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
- if (unlikely(copy_data(digest_addr, vc_req, &desc,
+ if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
chain->para.hash_result_len)) < 0) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
op->sym->auth.digest.data = digest_addr;
- op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
digest_offset);
break;
default:
@@ -904,7 +1064,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
/* record inhdr */
- vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+ vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
if (unlikely(vc_req->inhdr == NULL)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
@@ -927,6 +1087,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
return 0;
error_exit:
+ if (vc_req->wb)
+ free_wb_data(vc_req->wb, vc_req->wb_pool);
vc_req->len = INHDR_LEN;
return ret;
}
@@ -967,7 +1129,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
vc_req->head = head;
vc_req->zero_copy = vcrypto->option;
- req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);
+ req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
if (unlikely(req == NULL)) {
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
@@ -988,6 +1150,12 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
VC_LOG_ERR("Invalid option");
goto error_exit;
}
+ } else {
+ if (unlikely(move_desc(vc_req->head, &desc,
+ sizeof(*req)) < 0)) {
+ VC_LOG_ERR("Incorrect descriptor");
+ goto error_exit;
+ }
}
switch (req->header.opcode) {
@@ -1062,7 +1230,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
struct rte_mbuf *m_dst = op->sym->m_dst;
struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
uint16_t desc_idx;
- int ret = 0;
if (unlikely(!vc_req)) {
VC_LOG_ERR("Failed to retrieve vc_req");
@@ -1077,19 +1244,18 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
else {
- if (vc_req->zero_copy == 0) {
- ret = write_back_data(op, vc_req);
- if (unlikely(ret != 0))
- vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
- }
+ if (vc_req->zero_copy == 0)
+ write_back_data(vc_req);
}
vc_req->vq->used->ring[desc_idx].id = desc_idx;
vc_req->vq->used->ring[desc_idx].len = vc_req->len;
- rte_mempool_put(m_dst->pool, (void *)m_dst);
rte_mempool_put(m_src->pool, (void *)m_src);
+ if (m_dst)
+ rte_mempool_put(m_dst->pool, (void *)m_dst);
+
return vc_req->vq;
}
@@ -1186,6 +1352,18 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
goto error_exit;
}
+ snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
+ vcrypto->wb_pool = rte_mempool_create(name,
+ VHOST_CRYPTO_MBUF_POOL_SIZE,
+ sizeof(struct vhost_crypto_writeback_data),
+ 128, 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(), 0);
+ if (!vcrypto->wb_pool) {
+ VC_LOG_ERR("Failed to creath mempool");
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+
dev->extern_data = vcrypto;
dev->extern_ops.pre_msg_handle = NULL;
dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
@@ -1222,6 +1400,7 @@ rte_vhost_crypto_free(int vid)
rte_hash_free(vcrypto->session_map);
rte_mempool_free(vcrypto->mbuf_pool);
+ rte_mempool_free(vcrypto->wb_pool);
rte_free(vcrypto);
dev->extern_data = NULL;
@@ -1257,11 +1436,30 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
if (vcrypto->option == (uint8_t)option)
return 0;
- if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
+ if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
+ !(rte_mempool_full(vcrypto->wb_pool))) {
VC_LOG_ERR("Cannot update zero copy as mempool is not full");
return -EINVAL;
}
+ if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
+ char name[128];
+
+ snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
+ vcrypto->wb_pool = rte_mempool_create(name,
+ VHOST_CRYPTO_MBUF_POOL_SIZE,
+ sizeof(struct vhost_crypto_writeback_data),
+ 128, 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(), 0);
+ if (!vcrypto->wb_pool) {
+ VC_LOG_ERR("Failed to creath mbuf pool");
+ return -ENOMEM;
+ }
+ } else {
+ rte_mempool_free(vcrypto->wb_pool);
+ vcrypto->wb_pool = NULL;
+ }
+
vcrypto->option = (uint8_t)option;
return 0;
@@ -1277,9 +1475,8 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
struct vhost_virtqueue *vq;
uint16_t avail_idx;
uint16_t start_idx;
- uint16_t required;
uint16_t count;
- uint16_t i;
+ uint16_t i = 0;
if (unlikely(dev == NULL)) {
VC_LOG_ERR("Invalid vid %i", vid);
@@ -1311,27 +1508,66 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
/* for zero copy, we need 2 empty mbufs for src and dst, otherwise
* we need only 1 mbuf as src and dst
*/
- required = count * 2;
- if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
- required) < 0)) {
- VC_LOG_ERR("Insufficient memory");
- return -ENOMEM;
- }
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
+ (void **)mbufs, count * 2) < 0)) {
+ VC_LOG_ERR("Insufficient memory");
+ return -ENOMEM;
+ }
- for (i = 0; i < count; i++) {
- uint16_t used_idx = (start_idx + i) & (vq->size - 1);
- uint16_t desc_idx = vq->avail->ring[used_idx];
- struct vring_desc *head = &vq->desc[desc_idx];
- struct rte_crypto_op *op = ops[i];
+ for (i = 0; i < count; i++) {
+ uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+ uint16_t desc_idx = vq->avail->ring[used_idx];
+ struct vring_desc *head = &vq->desc[desc_idx];
+ struct rte_crypto_op *op = ops[i];
- op->sym->m_src = mbufs[i * 2];
- op->sym->m_dst = mbufs[i * 2 + 1];
- op->sym->m_src->data_off = 0;
- op->sym->m_dst->data_off = 0;
+ op->sym->m_src = mbufs[i * 2];
+ op->sym->m_dst = mbufs[i * 2 + 1];
+ op->sym->m_src->data_off = 0;
+ op->sym->m_dst->data_off = 0;
+
+ if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
+ op, head, desc_idx)) < 0)
+ break;
+ }
+
+ if (unlikely(i < count))
+ rte_mempool_put_bulk(vcrypto->mbuf_pool,
+ (void **)&mbufs[i * 2],
+ (count - i) * 2);
+
+ break;
+
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
+ (void **)mbufs, count) < 0)) {
+ VC_LOG_ERR("Insufficient memory");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+ uint16_t desc_idx = vq->avail->ring[used_idx];
+ struct vring_desc *head = &vq->desc[desc_idx];
+ struct rte_crypto_op *op = ops[i];
+
+ op->sym->m_src = mbufs[i];
+ op->sym->m_dst = NULL;
+ op->sym->m_src->data_off = 0;
+
+ if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
+ op, head, desc_idx)) < 0)
+ break;
+ }
+
+ if (unlikely(i < count))
+ rte_mempool_put_bulk(vcrypto->mbuf_pool,
+ (void **)&mbufs[i],
+ count - i);
+
+ break;
- if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
- desc_idx)) < 0)
- break;
}
vq->last_used_idx += i;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index cc154f31..3ea64eba 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1732,7 +1732,7 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
if (ret <= 0)
return ret;
- if (msg && msg->size) {
+ if (msg->size) {
if (msg->size > sizeof(msg->payload)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid msg size: %d\n", msg->size);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 8ad30c94..5e1a1a72 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -598,7 +598,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
avail_idx, &desc_count,
buf_vec, &vec_idx,
&buf_id, &len,
- VHOST_ACCESS_RO) < 0))
+ VHOST_ACCESS_RW) < 0))
return -1;
len = RTE_MIN(len, size);
@@ -1503,7 +1503,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
vq->last_avail_idx, &desc_count,
buf_vec, &nr_vec,
&buf_id, &dummy_len,
- VHOST_ACCESS_RW) < 0))
+ VHOST_ACCESS_RO) < 0))
break;
if (likely(dev->dequeue_zero_copy == 0))