aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /test
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'test')
-rw-r--r--test/bpf/dummy.c20
-rw-r--r--test/bpf/mbuf.h578
-rw-r--r--test/bpf/t1.c52
-rw-r--r--test/bpf/t2.c31
-rw-r--r--test/bpf/t3.c36
-rw-r--r--test/test-pipeline/init.c6
-rw-r--r--test/test-pipeline/main.h4
-rw-r--r--test/test-pipeline/pipeline_hash.c26
-rw-r--r--test/test/Makefile19
-rw-r--r--test/test/autotest.py13
-rw-r--r--test/test/autotest_data.py1087
-rw-r--r--test/test/autotest_runner.py519
-rw-r--r--test/test/commands.c5
-rw-r--r--test/test/meson.build22
-rw-r--r--test/test/process.h29
-rw-r--r--test/test/resource.c33
-rw-r--r--test/test/resource.h33
-rw-r--r--test/test/test_bpf.c1926
-rw-r--r--test/test/test_cmdline_cirbuf.c2
-rw-r--r--test/test/test_cmdline_ipaddr.c2
-rw-r--r--test/test/test_common.c38
-rw-r--r--test/test/test_compressdev.c1486
-rw-r--r--test/test/test_compressdev_test_buffer.h295
-rw-r--r--test/test/test_cryptodev.c472
-rw-r--r--test/test/test_cryptodev.h5
-rw-r--r--test/test/test_cryptodev_aes_test_vectors.h109
-rw-r--r--test/test/test_cryptodev_asym.c1369
-rw-r--r--test/test/test_cryptodev_asym_util.h42
-rw-r--r--test/test/test_cryptodev_blockcipher.c103
-rw-r--r--test/test/test_cryptodev_blockcipher.h4
-rw-r--r--test/test/test_cryptodev_des_test_vectors.h90
-rw-r--r--test/test/test_cryptodev_dh_test_vectors.h80
-rw-r--r--test/test/test_cryptodev_dsa_test_vectors.h117
-rw-r--r--test/test/test_cryptodev_hash_test_vectors.h158
-rw-r--r--test/test/test_cryptodev_mod_test_vectors.h103
-rw-r--r--test/test/test_cryptodev_rsa_test_vectors.h88
-rw-r--r--test/test/test_devargs.c103
-rw-r--r--test/test/test_distributor_perf.c3
-rw-r--r--test/test/test_eal_flags.c92
-rw-r--r--test/test/test_event_crypto_adapter.c928
-rw-r--r--test/test/test_event_eth_rx_adapter.c344
-rw-r--r--test/test/test_event_timer_adapter.c1830
-rw-r--r--test/test/test_fbarray.c576
-rw-r--r--test/test/test_flow_classify.c20
-rw-r--r--test/test/test_hash.c12
-rw-r--r--test/test/test_hash_multiwriter.c58
-rw-r--r--test/test/test_hash_perf.c36
-rw-r--r--test/test/test_hash_readwrite.c637
-rw-r--r--test/test/test_interrupts.c39
-rw-r--r--test/test/test_kni.c43
-rw-r--r--test/test/test_link_bonding.c31
-rw-r--r--test/test/test_link_bonding_mode4.c8
-rw-r--r--test/test/test_link_bonding_rssconf.c12
-rw-r--r--test/test/test_malloc.c32
-rw-r--r--test/test/test_memory.c27
-rw-r--r--test/test/test_mempool.c46
-rw-r--r--test/test/test_memzone.c227
-rw-r--r--test/test/test_meter.c194
-rw-r--r--test/test/test_mp_secondary.c26
-rw-r--r--test/test/test_pmd_perf.c35
-rw-r--r--test/test/test_pmd_ring.c6
-rw-r--r--test/test/test_power_acpi_cpufreq.c46
-rw-r--r--test/test/test_power_kvm_vm.c21
-rw-r--r--test/test/test_reorder.c47
-rw-r--r--test/test/test_resource.c33
-rw-r--r--test/test/test_service_cores.c116
-rw-r--r--test/test/test_table.c11
-rw-r--r--test/test/test_table.h6
-rw-r--r--test/test/test_table_combined.c4
-rw-r--r--test/test/test_table_pipeline.c14
-rw-r--r--test/test/test_table_tables.c6
-rw-r--r--test/test/virtual_pmd.c6
72 files changed, 13125 insertions, 1552 deletions
diff --git a/test/bpf/dummy.c b/test/bpf/dummy.c
new file mode 100644
index 00000000..5851469e
--- /dev/null
+++ b/test/bpf/dummy.c
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * does nothing always return success.
+ * used to measure BPF infrastructure overhead.
+ * To compile:
+ * clang -O2 -target bpf -c dummy.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+
+uint64_t
+entry(void *arg)
+{
+ return 1;
+}
diff --git a/test/bpf/mbuf.h b/test/bpf/mbuf.h
new file mode 100644
index 00000000..f24f908d
--- /dev/null
+++ b/test/bpf/mbuf.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
+ */
+
+/*
+ * Snipper from dpdk.org rte_mbuf.h.
+ * used to provide BPF programs information about rte_mbuf layout.
+ */
+
+#ifndef _MBUF_H_
+#define _MBUF_H_
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Packet Offload Features Flags. It also carry packet type information.
+ * Critical resources. Both rx/tx shared these bits. Be cautious on any change
+ *
+ * - RX flags start at bit position zero, and get added to the left of previous
+ * flags.
+ * - The most-significant 3 bits are reserved for generic mbuf flags
+ * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
+ * added to the right of the previously defined flags i.e. they should count
+ * downwards, not upwards.
+ *
+ * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
+ * rte_get_tx_ol_flag_name().
+ */
+
+/**
+ * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when
+ * the packet is recognized as a VLAN, but the behavior between PMDs
+ * was not the same. This flag is kept for some time to avoid breaking
+ * applications and should be replaced by PKT_RX_VLAN_STRIPPED.
+ */
+#define PKT_RX_VLAN_PKT (1ULL << 0)
+
+#define PKT_RX_RSS_HASH (1ULL << 1)
+/**< RX packet with RSS hash result. */
+#define PKT_RX_FDIR (1ULL << 2)
+/**< RX packet with FDIR match indicate. */
+
+/**
+ * Deprecated.
+ * Checking this flag alone is deprecated: check the 2 bits of
+ * PKT_RX_L4_CKSUM_MASK.
+ * This flag was set when the L4 checksum of a packet was detected as
+ * wrong by the hardware.
+ */
+#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
+
+/**
+ * Deprecated.
+ * Checking this flag alone is deprecated: check the 2 bits of
+ * PKT_RX_IP_CKSUM_MASK.
+ * This flag was set when the IP checksum of a packet was detected as
+ * wrong by the hardware.
+ */
+#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
+
+#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
+/**< External IP header checksum error. */
+
+/**
+ * A vlan has been stripped by the hardware and its tci is saved in
+ * mbuf->vlan_tci. This can only happen if vlan stripping is enabled
+ * in the RX configuration of the PMD.
+ */
+#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
+
+/**
+ * Mask of bits used to determine the status of RX IP checksum.
+ * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
+ * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
+ * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
+ * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet
+ * data, but the integrity of the IP header is verified.
+ */
+#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
+
+#define PKT_RX_IP_CKSUM_UNKNOWN 0
+#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
+#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
+#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
+
+/**
+ * Mask of bits used to determine the status of RX L4 checksum.
+ * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum
+ * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong
+ * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid
+ * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet
+ * data, but the integrity of the L4 data is verified.
+ */
+#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
+
+#define PKT_RX_L4_CKSUM_UNKNOWN 0
+#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
+#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
+#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
+
+#define PKT_RX_IEEE1588_PTP (1ULL << 9)
+/**< RX IEEE1588 L2 Ethernet PT Packet. */
+#define PKT_RX_IEEE1588_TMST (1ULL << 10)
+/**< RX IEEE1588 L2/L4 timestamped packet.*/
+#define PKT_RX_FDIR_ID (1ULL << 13)
+/**< FD id reported if FDIR match. */
+#define PKT_RX_FDIR_FLX (1ULL << 14)
+/**< Flexible bytes reported if FDIR match. */
+
+/**
+ * The 2 vlans have been stripped by the hardware and their tci are
+ * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
+ * This can only happen if vlan stripping is enabled in the RX
+ * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED
+ * must also be set.
+ */
+#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
+
+/**
+ * Deprecated.
+ * RX packet with double VLAN stripped.
+ * This flag is replaced by PKT_RX_QINQ_STRIPPED.
+ */
+#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
+
+/**
+ * When packets are coalesced by a hardware or virtual driver, this flag
+ * can be set in the RX mbuf, meaning that the m->tso_segsz field is
+ * valid and is set to the segment size of original packets.
+ */
+#define PKT_RX_LRO (1ULL << 16)
+
+/**
+ * Indicate that the timestamp field in the mbuf is valid.
+ */
+#define PKT_RX_TIMESTAMP (1ULL << 17)
+
+/* add new RX flags here */
+
+/* add new TX flags here */
+
+/**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC (1ULL << 44)
+
+/**
+ * Bits 45:48 used for the tunnel type.
+ * When doing Tx offload like TSO or checksum, the HW needs to configure the
+ * tunnel type into the HW descriptors.
+ */
+#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
+#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
+#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
+#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
+/**< TX packet with MPLS-in-UDP RFC 7510 header. */
+#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
+/* add new TX TUNNEL type here */
+#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
+
+/**
+ * Second VLAN insertion (QinQ) flag.
+ */
+#define PKT_TX_QINQ_PKT (1ULL << 49)
+/**< TX packet with double VLAN inserted. */
+
+/**
+ * TCP segmentation offload. To enable this offload feature for a
+ * packet to be transmitted on hardware supporting TSO:
+ * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
+ * PKT_TX_TCP_CKSUM)
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum
+ * to 0 in the packet
+ * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
+ * - calculate the pseudo header checksum without taking ip_len in account,
+ * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
+ * rte_ipv6_phdr_cksum() that can be used as helpers.
+ */
+#define PKT_TX_TCP_SEG (1ULL << 50)
+
+#define PKT_TX_IEEE1588_TMST (1ULL << 51)
+/**< TX IEEE1588 packet to timestamp. */
+
+/**
+ * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved,
+ * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware
+ * L4 checksum offload, the user needs to:
+ * - fill l2_len and l3_len in mbuf
+ * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - calculate the pseudo header checksum and set it in the L4 header (only
+ * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum().
+ * For SCTP, set the crc field to 0.
+ */
+#define PKT_TX_L4_NO_CKSUM (0ULL << 52)
+/**< Disable L4 cksum of TX pkt. */
+#define PKT_TX_TCP_CKSUM (1ULL << 52)
+/**< TCP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_SCTP_CKSUM (2ULL << 52)
+/**< SCTP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_UDP_CKSUM (3ULL << 52)
+/**< UDP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_L4_MASK (3ULL << 52)
+/**< Mask for L4 cksum offload request. */
+
+/**
+ * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
+ * also be set by the application, although a PMD will only check
+ * PKT_TX_IP_CKSUM.
+ * - set the IP checksum field in the packet to 0
+ * - fill the mbuf offload information: l2_len, l3_len
+ */
+#define PKT_TX_IP_CKSUM (1ULL << 54)
+
+/**
+ * Packet is IPv4. This flag must be set when using any offload feature
+ * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
+#define PKT_TX_IPV4 (1ULL << 55)
+
+/**
+ * Packet is IPv6. This flag must be set when using an offload feature
+ * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
+#define PKT_TX_IPV6 (1ULL << 56)
+
+#define PKT_TX_VLAN_PKT (1ULL << 57)
+/**< TX packet is a 802.1q VLAN packet. */
+
+/**
+ * Offload the IP checksum of an external header in the hardware. The
+ * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh
+ * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the
+ * packet must be set to 0.
+ * - set the outer IP checksum field in the packet to 0
+ * - fill the mbuf offload information: outer_l2_len, outer_l3_len
+ */
+#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
+
+/**
+ * Packet outer header is IPv4. This flag must be set when using any
+ * outer offload feature (L3 or L4 checksum) to tell the NIC that the
+ * outer header of the tunneled packet is an IPv4 packet.
+ */
+#define PKT_TX_OUTER_IPV4 (1ULL << 59)
+
+/**
+ * Packet outer header is IPv6. This flag must be set when using any
+ * outer offload feature (L4 checksum) to tell the NIC that the outer
+ * header of the tunneled packet is an IPv6 packet.
+ */
+#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+
+/**
+ * Bitmask of all supported packet Tx offload features flags,
+ * which can be set for packet.
+ */
+#define PKT_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_IEEE1588_TMST | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ PKT_TX_MACSEC)
+
+#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
+
+#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
+
+/* Use final bit of flags to indicate a control mbuf */
+#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
+
+/** Alignment constraint of mbuf private area. */
+#define RTE_MBUF_PRIV_ALIGN 8
+
+/**
+ * Get the name of a RX offload flag
+ *
+ * @param mask
+ * The mask describing the flag.
+ * @return
+ * The name of this flag, or NULL if it's not a valid RX flag.
+ */
+const char *rte_get_rx_ol_flag_name(uint64_t mask);
+
+/**
+ * Dump the list of RX offload flags in a buffer
+ *
+ * @param mask
+ * The mask describing the RX flags.
+ * @param buf
+ * The output buffer.
+ * @param buflen
+ * The length of the buffer.
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
+
+/**
+ * Get the name of a TX offload flag
+ *
+ * @param mask
+ * The mask describing the flag. Usually only one bit must be set.
+ * Several bits can be given if they belong to the same mask.
+ * Ex: PKT_TX_L4_MASK.
+ * @return
+ * The name of this flag, or NULL if it's not a valid TX flag.
+ */
+const char *rte_get_tx_ol_flag_name(uint64_t mask);
+
+/**
+ * Dump the list of TX offload flags in a buffer
+ *
+ * @param mask
+ * The mask describing the TX flags.
+ * @param buf
+ * The output buffer.
+ * @param buflen
+ * The length of the buffer.
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
+
+/**
+ * Some NICs need at least 2KB buffer to RX standard Ethernet frame without
+ * splitting it into multiple segments.
+ * So, for mbufs that planned to be involved into RX/TX, the recommended
+ * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM.
+ */
+#define RTE_MBUF_DEFAULT_DATAROOM 2048
+#define RTE_MBUF_DEFAULT_BUF_SIZE \
+ (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
+
+/* define a set of marker types that can be used to refer to set points in the
+ * mbuf.
+ */
+__extension__
+typedef void *MARKER[0]; /**< generic marker for a point in a structure */
+__extension__
+typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
+__extension__
+typedef uint64_t MARKER64[0];
+/**< marker that allows us to overwrite 8 bytes with a single assignment */
+
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * The generic rte_mbuf, containing a packet mbuf.
+ */
+struct rte_mbuf {
+ MARKER cacheline0;
+
+ void *buf_addr; /**< Virtual address of segment buffer. */
+ /**
+ * Physical address of segment buffer.
+ * Force alignment to 8-bytes, so as to ensure we have the exact
+ * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
+ * working on vector drivers easier.
+ */
+ phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
+
+ /* next 8 bytes are initialised on RX descriptor rearm */
+ MARKER64 rearm_data;
+ uint16_t data_off;
+
+ /**
+ * Reference counter. Its size should at least equal to the size
+ * of port field (16 bits), to support zero-copy broadcast.
+ * It should only be accessed using the following functions:
+ * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
+ * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
+ * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC
+ * config option.
+ */
+ RTE_STD_C11
+ union {
+ rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
+ uint16_t refcnt;
+ /**< Non-atomically accessed refcnt */
+ };
+ uint16_t nb_segs; /**< Number of segments. */
+
+ /** Input port (16 bits to support more than 256 virtual ports). */
+ uint16_t port;
+
+ uint64_t ol_flags; /**< Offload features. */
+
+ /* remaining bytes are set on RX when pulling packet from descriptor */
+ MARKER rx_descriptor_fields1;
+
+ /*
+ * The packet type, which is the combination of outer/inner L2, L3, L4
+ * and tunnel types. The packet_type is about data really present in the
+ * mbuf. Example: if vlan stripping is enabled, a received vlan packet
+ * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
+ * vlan is stripped from the data.
+ */
+ RTE_STD_C11
+ union {
+ uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
+ struct {
+ uint32_t l2_type:4; /**< (Outer) L2 type. */
+ uint32_t l3_type:4; /**< (Outer) L3 type. */
+ uint32_t l4_type:4; /**< (Outer) L4 type. */
+ uint32_t tun_type:4; /**< Tunnel type. */
+ uint32_t inner_l2_type:4; /**< Inner L2 type. */
+ uint32_t inner_l3_type:4; /**< Inner L3 type. */
+ uint32_t inner_l4_type:4; /**< Inner L4 type. */
+ };
+ };
+
+ uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */
+ uint16_t vlan_tci;
+
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ RTE_STD_C11
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ /**< Second 4 flexible bytes */
+ };
+ uint32_t hi;
+ /**< First 4 flexible bytes or FD ID, dependent on
+ * PKT_RX_FDIR_* flag in ol_flags.
+ */
+ } fdir; /**< Filter identifier if FDIR enabled */
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched; /**< Hierarchical scheduler */
+ uint32_t usr;
+ /**< User defined tags. See rte_distributor_process() */
+ } hash; /**< hash information */
+
+ /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */
+ uint16_t vlan_tci_outer;
+
+ uint16_t buf_len; /**< Length of segment buffer. */
+
+ /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference
+ * are not normalized but are always the same for a given port.
+ */
+ uint64_t timestamp;
+
+ /* second cache line - fields only used in slow path or on TX */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ RTE_STD_C11
+ union {
+ void *userdata; /**< Can be used for external metadata */
+ uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
+ };
+
+ struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+
+ /* fields to support TX offloads */
+ RTE_STD_C11
+ union {
+ uint64_t tx_offload; /**< combined for easy fetch */
+ __extension__
+ struct {
+ uint64_t l2_len:7;
+ /**< L2 (MAC) Header Length for non-tunneling pkt.
+ * Outer_L4_len + ... + Inner_L2_len for tunneling pkt.
+ */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_l3_len:9;
+ /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:7;
+ /**< Outer L2 (MAC) Hdr Length. */
+
+ /* uint64_t unused:8; */
+ };
+ };
+
+ /** Size of the application private data. In case of an indirect
+ * mbuf, it stores the direct mbuf private data size.
+ */
+ uint16_t priv_size;
+
+ /** Timesync flags for use with IEEE1588. */
+ uint16_t timesync;
+
+ /** Sequence number. See also rte_reorder_insert(). */
+ uint32_t seqn;
+
+} __rte_cache_aligned;
+
+
+/**
+ * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ */
+#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+
+/**
+ * Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ */
+#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
+
+/**
+ * Private data in case of pktmbuf pool.
+ *
+ * A structure that contains some pktmbuf_pool-specific data that are
+ * appended after the mempool structure (in private data).
+ */
+struct rte_pktmbuf_pool_private {
+ uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
+ uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
+};
+
+/**
+ * A macro that points to an offset into the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param o
+ * The offset into the mbuf data.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod_offset(m, t, o) \
+ ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
+
+/**
+ * A macro that points to the start of the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MBUF_H_ */
diff --git a/test/bpf/t1.c b/test/bpf/t1.c
new file mode 100644
index 00000000..60f9434a
--- /dev/null
+++ b/test/bpf/t1.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to first segment packet data as an input parameter.
+ * analog of tcpdump -s 1 -d 'dst 1.2.3.4 && udp && dst port 5000'
+ * (000) ldh [12]
+ * (001) jeq #0x800 jt 2 jf 12
+ * (002) ld [30]
+ * (003) jeq #0x1020304 jt 4 jf 12
+ * (004) ldb [23]
+ * (005) jeq #0x11 jt 6 jf 12
+ * (006) ldh [20]
+ * (007) jset #0x1fff jt 12 jf 8
+ * (008) ldxb 4*([14]&0xf)
+ * (009) ldh [x + 16]
+ * (010) jeq #0x1388 jt 11 jf 12
+ * (011) ret #1
+ * (012) ret #0
+ *
+ * To compile:
+ * clang -O2 -target bpf -c t1.c
+ */
+
+#include <stdint.h>
+#include <net/ethernet.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+
+uint64_t
+entry(void *pkt)
+{
+ struct ether_header *ether_header = (void *)pkt;
+
+ if (ether_header->ether_type != __builtin_bswap16(0x0800))
+ return 0;
+
+ struct iphdr *iphdr = (void *)(ether_header + 1);
+ if (iphdr->protocol != 17 || (iphdr->frag_off & 0x1ffff) != 0 ||
+ iphdr->daddr != __builtin_bswap32(0x1020304))
+ return 0;
+
+ int hlen = iphdr->ihl * 4;
+ struct udphdr *udphdr = (void *)iphdr + hlen;
+
+ if (udphdr->dest != __builtin_bswap16(5000))
+ return 0;
+
+ return 1;
+}
diff --git a/test/bpf/t2.c b/test/bpf/t2.c
new file mode 100644
index 00000000..69d7a4fe
--- /dev/null
+++ b/test/bpf/t2.c
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to struct rte_mbuf as an input parameter.
+ * cleanup mbuf's vlan_tci and all related RX flags
+ * (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED).
+ * Doesn't touch contents of packet data.
+ * To compile:
+ * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \
+ * -target bpf -Wno-int-to-void-pointer-cast -c t2.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <rte_config.h>
+#include "mbuf.h"
+
+uint64_t
+entry(void *pkt)
+{
+ struct rte_mbuf *mb;
+
+ mb = pkt;
+ mb->vlan_tci = 0;
+ mb->ol_flags &= ~(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+
+ return 1;
+}
diff --git a/test/bpf/t3.c b/test/bpf/t3.c
new file mode 100644
index 00000000..531b9cb8
--- /dev/null
+++ b/test/bpf/t3.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to struct rte_mbuf as an input parameter.
+ * Dump the mbuf into stdout if it is an ARP packet (aka tcpdump 'arp').
+ * To compile:
+ * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \
+ * -target bpf -Wno-int-to-void-pointer-cast -c t3.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <net/ethernet.h>
+#include <rte_config.h>
+#include "mbuf.h"
+
+extern void rte_pktmbuf_dump(FILE *, const struct rte_mbuf *, unsigned int);
+
+uint64_t
+entry(const void *pkt)
+{
+ const struct rte_mbuf *mb;
+ const struct ether_header *eth;
+
+ mb = pkt;
+ eth = rte_pktmbuf_mtod(mb, const struct ether_header *);
+
+ if (eth->ether_type == __builtin_bswap16(ETHERTYPE_ARP))
+ rte_pktmbuf_dump(stdout, mb, 64);
+
+ return 1;
+}
diff --git a/test/test-pipeline/init.c b/test/test-pipeline/init.c
index 19cf04a6..f33216c9 100644
--- a/test/test-pipeline/init.c
+++ b/test/test-pipeline/init.c
@@ -70,11 +70,7 @@ struct app_params app = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .header_split = 0, /* Header Split disabled */
- .hw_ip_checksum = 1, /* IP checksum offload enabled */
- .hw_vlan_filter = 0, /* VLAN filtering disabled */
- .jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /* CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/test/test-pipeline/main.h b/test/test-pipeline/main.h
index f844e941..59dcfddb 100644
--- a/test/test-pipeline/main.h
+++ b/test/test-pipeline/main.h
@@ -107,6 +107,10 @@ uint64_t test_hash(void *key,
uint32_t key_size,
uint64_t seed);
+uint32_t test_hash_cuckoo(const void *key,
+ uint32_t key_size,
+ uint32_t seed);
+
void app_main_loop_worker(void);
void app_main_loop_worker_pipeline_stub(void);
void app_main_loop_worker_pipeline_hash(void);
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 11e2402d..c2014723 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -15,6 +15,7 @@
#include <rte_port_ring.h>
#include <rte_table_hash.h>
#include <rte_hash.h>
+#include <rte_table_hash_cuckoo.h>
#include <rte_pipeline.h>
#include "main.h"
@@ -151,6 +152,17 @@ app_main_loop_worker_pipeline_hash(void) {
.seed = 0,
};
+ struct rte_table_hash_cuckoo_params table_hash_cuckoo_params = {
+ .name = "TABLE",
+ .key_size = key_size,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 24,
+ .n_buckets = 1 << 22,
+ .f_hash = test_hash_cuckoo,
+ .seed = 0,
+ };
+
/* Table configuration */
switch (app.pipeline_type) {
case e_APP_PIPELINE_HASH_KEY8_EXT:
@@ -298,7 +310,7 @@ app_main_loop_worker_pipeline_hash(void) {
{
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_cuckoo_ops,
- .arg_create = &table_hash_params,
+ .arg_create = &table_hash_cuckoo_params,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
@@ -379,6 +391,18 @@ uint64_t test_hash(
return signature;
}
+uint32_t test_hash_cuckoo(
+ const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed)
+{
+ const uint32_t *k32 = key;
+ uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
+ uint32_t signature = (ip_dst >> 2) | ((ip_dst & 0x3) << 30);
+
+ return signature;
+}
+
void
app_main_loop_rx_metadata(void) {
uint32_t i, j;
diff --git a/test/test/Makefile b/test/test/Makefile
index a88cc38b..e6967bab 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -70,6 +70,7 @@ SRCS-y += test_memzone.c
SRCS-y += test_bitmap.c
SRCS-y += test_reciprocal_division.c
SRCS-y += test_reciprocal_division_perf.c
+SRCS-y += test_fbarray.c
SRCS-y += test_ring.c
SRCS-y += test_ring_perf.c
@@ -113,6 +114,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_functions.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_scaling.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
@@ -161,7 +163,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += test_distributor_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_REORDER) += test_reorder.c
-SRCS-y += test_devargs.c
SRCS-y += virtual_pmd.c
SRCS-y += packet_burst_generator.c
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_acl.c
@@ -180,11 +181,18 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_asym.c
+
+ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y)
+SRCS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += test_compressdev.c
+endif
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
SRCS-y += test_eventdev.c
SRCS-y += test_event_ring.c
SRCS-y += test_event_eth_rx_adapter.c
+SRCS-y += test_event_timer_adapter.c
+SRCS-y += test_event_crypto_adapter.c
endif
ifeq ($(CONFIG_RTE_LIBRTE_RAWDEV),y)
@@ -193,6 +201,8 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += test_bpf.c
+
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
@@ -201,6 +211,11 @@ CFLAGS += $(WERROR_FLAGS)
CFLAGS += -D_GNU_SOURCE
LDLIBS += -lm
+ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y)
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+LDLIBS += -lz
+endif
+endif
# Disable VTA for memcpy test
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
@@ -211,6 +226,8 @@ CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments
# designated initializers.
ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
CFLAGS_test_eventdev_sw.o += -Wno-missing-field-initializers
+CFLAGS_test_event_timer_adapter.o += -Wno-missing-field-initializers
+CFLAGS_test_event_crypto_adapter.o += -Wno-missing-field-initializers
endif
endif
endif
diff --git a/test/test/autotest.py b/test/test/autotest.py
index 1cfd8cf2..12997fdf 100644
--- a/test/test/autotest.py
+++ b/test/test/autotest.py
@@ -36,14 +36,15 @@ cmdline = "%s -c f -n 4" % (sys.argv[1])
print(cmdline)
-runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
- test_whitelist)
+# how many workers to run tests with. FreeBSD doesn't support multiple primary
+# processes, so make it 1, otherwise make it 4. ignored for non-parallel tests
+n_processes = 1 if "bsdapp" in target else 4
-for test_group in autotest_data.parallel_test_group_list:
- runner.add_parallel_test_group(test_group)
+runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
+ test_whitelist, n_processes)
-for test_group in autotest_data.non_parallel_test_group_list:
- runner.add_non_parallel_test_group(test_group)
+runner.parallel_tests = autotest_data.parallel_test_list[:]
+runner.non_parallel_tests = autotest_data.non_parallel_test_list[:]
num_fails = runner.run_all_tests()
diff --git a/test/test/autotest_data.py b/test/test/autotest_data.py
index aacfe0a6..f68d9b11 100644
--- a/test/test/autotest_data.py
+++ b/test/test/autotest_data.py
@@ -3,465 +3,662 @@
# Test data for autotests
-from glob import glob
from autotest_test_funcs import *
-
-# quick and dirty function to find out number of sockets
-def num_sockets():
- result = len(glob("/sys/devices/system/node/node*"))
- if result == 0:
- return 1
- return result
-
-
-# Assign given number to each socket
-# e.g. 32 becomes 32,32 or 32,32,32,32
-def per_sockets(num):
- return ",".join([str(num)] * num_sockets())
-
# groups of tests that can be run in parallel
# the grouping has been found largely empirically
-parallel_test_group_list = [
- {
- "Prefix": "group_1",
- "Memory": per_sockets(8),
- "Tests":
- [
- {
- "Name": "Cycles autotest",
- "Command": "cycles_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Timer autotest",
- "Command": "timer_autotest",
- "Func": timer_autotest,
- "Report": None,
- },
- {
- "Name": "Debug autotest",
- "Command": "debug_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Errno autotest",
- "Command": "errno_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Meter autotest",
- "Command": "meter_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Common autotest",
- "Command": "common_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Resource autotest",
- "Command": "resource_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_2",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Memory autotest",
- "Command": "memory_autotest",
- "Func": memory_autotest,
- "Report": None,
- },
- {
- "Name": "Read/write lock autotest",
- "Command": "rwlock_autotest",
- "Func": rwlock_autotest,
- "Report": None,
- },
- {
- "Name": "Logs autotest",
- "Command": "logs_autotest",
- "Func": logs_autotest,
- "Report": None,
- },
- {
- "Name": "CPU flags autotest",
- "Command": "cpuflags_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Version autotest",
- "Command": "version_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "EAL filesystem autotest",
- "Command": "eal_fs_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "EAL flags autotest",
- "Command": "eal_flags_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Hash autotest",
- "Command": "hash_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ],
- },
- {
- "Prefix": "group_3",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "LPM autotest",
- "Command": "lpm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "LPM6 autotest",
- "Command": "lpm6_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Memcpy autotest",
- "Command": "memcpy_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Memzone autotest",
- "Command": "memzone_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "String autotest",
- "Command": "string_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Alarm autotest",
- "Command": "alarm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_4",
- "Memory": per_sockets(128),
- "Tests":
- [
- {
- "Name": "PCI autotest",
- "Command": "pci_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Malloc autotest",
- "Command": "malloc_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Multi-process autotest",
- "Command": "multiprocess_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Mbuf autotest",
- "Command": "mbuf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Per-lcore autotest",
- "Command": "per_lcore_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Ring autotest",
- "Command": "ring_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_5",
- "Memory": "32",
- "Tests":
- [
- {
- "Name": "Spinlock autotest",
- "Command": "spinlock_autotest",
- "Func": spinlock_autotest,
- "Report": None,
- },
- {
- "Name": "Byte order autotest",
- "Command": "byteorder_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "TAILQ autotest",
- "Command": "tailq_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Command-line autotest",
- "Command": "cmdline_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Interrupts autotest",
- "Command": "interrupt_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_6",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Function reentrancy autotest",
- "Command": "func_reentrancy_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Mempool autotest",
- "Command": "mempool_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Atomics autotest",
- "Command": "atomic_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Prefetch autotest",
- "Command": "prefetch_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Red autotest",
- "Command": "red_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_7",
- "Memory": "64",
- "Tests":
- [
- {
- "Name": "PMD ring autotest",
- "Command": "ring_pmd_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Access list control autotest",
- "Command": "acl_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Sched autotest",
- "Command": "sched_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+parallel_test_list = [
+ {
+ "Name": "Cycles autotest",
+ "Command": "cycles_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer autotest",
+ "Command": "timer_autotest",
+ "Func": timer_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Debug autotest",
+ "Command": "debug_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Errno autotest",
+ "Command": "errno_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Meter autotest",
+ "Command": "meter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Common autotest",
+ "Command": "common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Resource autotest",
+ "Command": "resource_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memory autotest",
+ "Command": "memory_autotest",
+ "Func": memory_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Read/write lock autotest",
+ "Command": "rwlock_autotest",
+ "Func": rwlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Logs autotest",
+ "Command": "logs_autotest",
+ "Func": logs_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "CPU flags autotest",
+ "Command": "cpuflags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Version autotest",
+ "Command": "version_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL filesystem autotest",
+ "Command": "eal_fs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL flags autotest",
+ "Command": "eal_flags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash autotest",
+ "Command": "hash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM autotest",
+ "Command": "lpm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM6 autotest",
+ "Command": "lpm6_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy autotest",
+ "Command": "memcpy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memzone autotest",
+ "Command": "memzone_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "String autotest",
+ "Command": "string_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Alarm autotest",
+ "Command": "alarm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Malloc autotest",
+ "Command": "malloc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Multi-process autotest",
+ "Command": "multiprocess_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mbuf autotest",
+ "Command": "mbuf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Per-lcore autotest",
+ "Command": "per_lcore_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring autotest",
+ "Command": "ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Spinlock autotest",
+ "Command": "spinlock_autotest",
+ "Func": spinlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Byte order autotest",
+ "Command": "byteorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "TAILQ autotest",
+ "Command": "tailq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Command-line autotest",
+ "Command": "cmdline_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Interrupts autotest",
+ "Command": "interrupt_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Function reentrancy autotest",
+ "Command": "func_reentrancy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool autotest",
+ "Command": "mempool_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Atomics autotest",
+ "Command": "atomic_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Prefetch autotest",
+ "Command": "prefetch_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red autotest",
+ "Command": "red_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "PMD ring autotest",
+ "Command": "ring_pmd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Access list control autotest",
+ "Command": "acl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Sched autotest",
+ "Command": "sched_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Eventdev selftest octeontx",
+ "Command": "eventdev_selftest_octeontx",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Event ring autotest",
+ "Command": "event_ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Table autotest",
+ "Command": "table_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Flow classify autotest",
+ "Command": "flow_classify_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Event eth rx adapter autotest",
+ "Command": "event_eth_rx_adapter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "User delay",
+ "Command": "user_delay_us",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Rawdev autotest",
+ "Command": "rawdev_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Kvargs autotest",
+ "Command": "kvargs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Devargs autotest",
+ "Command": "devargs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding autotest",
+ "Command": "link_bonding_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding mode4 autotest",
+ "Command": "link_bonding_mode4_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding rssconf autotest",
+ "Command": "link_bonding_rssconf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Crc autotest",
+ "Command": "crc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Distributor autotest",
+ "Command": "distributor_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reorder autotest",
+ "Command": "reorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Barrier autotest",
+ "Command": "barrier_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Bitmap test",
+ "Command": "bitmap_test",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash scaling autotest",
+ "Command": "hash_scaling_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash multiwriter autotest",
+ "Command": "hash_multiwriter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Service autotest",
+ "Command": "service_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer racecond autotest",
+ "Command": "timer_racecond_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Member autotest",
+ "Command": "member_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Efd_autotest",
+ "Command": "efd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Thash autotest",
+ "Command": "thash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash function autotest",
+ "Command": "hash_functions_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw mrvl autotest",
+ "Command": "cryptodev_sw_mrvl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev dpaa2 sec autotest",
+ "Command": "cryptodev_dpaa2_sec_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev dpaa sec autotest",
+ "Command": "cryptodev_dpaa_sec_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev qat autotest",
+ "Command": "cryptodev_qat_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev aesni mb autotest",
+ "Command": "cryptodev_aesni_mb_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev openssl autotest",
+ "Command": "cryptodev_openssl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev scheduler autotest",
+ "Command": "cryptodev_scheduler_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev aesni gcm autotest",
+ "Command": "cryptodev_aesni_gcm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev null autotest",
+ "Command": "cryptodev_null_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw snow3g autotest",
+ "Command": "cryptodev_sw_snow3g_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw kasumi autotest",
+ "Command": "cryptodev_sw_kasumi_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev_sw_zuc_autotest",
+ "Command": "cryptodev_sw_zuc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reciprocal division",
+ "Command": "reciprocal_division",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red all",
+ "Command": "red_all",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Fbarray autotest",
+ "Command": "fbarray_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ #
+ #Please always keep all dump tests at the end and together!
+ #
+ {
+ "Name": "Dump physmem",
+ "Command": "dump_physmem",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump memzone",
+ "Command": "dump_memzone",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump struct sizes",
+ "Command": "dump_struct_sizes",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump mempool",
+ "Command": "dump_mempool",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump malloc stats",
+ "Command": "dump_malloc_stats",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump devargs",
+ "Command": "dump_devargs",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump log types",
+ "Command": "dump_log_types",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump_ring",
+ "Command": "dump_ring",
+ "Func": dump_autotest,
+ "Report": None,
},
]
# tests that should not be run when any other tests are running
-non_parallel_test_group_list = [
-
+non_parallel_test_list = [
+ {
+ "Name": "Eventdev common autotest",
+ "Command": "eventdev_common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Eventdev selftest sw",
+ "Command": "eventdev_selftest_sw",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "KNI autotest",
+ "Command": "kni_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool performance autotest",
+ "Command": "mempool_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy performance autotest",
+ "Command": "memcpy_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash performance autotest",
+ "Command": "hash_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Power autotest",
+ "Command": "power_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Power ACPI cpufreq autotest",
+ "Command": "power_acpi_cpufreq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
{
- "Prefix": "eventdev",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "Eventdev common autotest",
- "Command": "eventdev_common_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "eventdev_sw",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "Eventdev sw autotest",
- "Command": "eventdev_sw_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "kni",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "KNI autotest",
- "Command": "kni_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "mempool_perf",
- "Memory": per_sockets(256),
- "Tests":
- [
- {
- "Name": "Mempool performance autotest",
- "Command": "mempool_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "memcpy_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Memcpy performance autotest",
- "Command": "memcpy_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "hash_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Hash performance autotest",
- "Command": "hash_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power autotest",
- "Command": "power_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power_acpi_cpufreq",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power ACPI cpufreq autotest",
- "Command": "power_acpi_cpufreq_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power_kvm_vm",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power KVM VM autotest",
- "Command": "power_kvm_vm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "timer_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Timer performance autotest",
- "Command": "timer_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+ "Name": "Power KVM VM autotest",
+ "Command": "power_kvm_vm_autotest",
+ "Func": default_autotest,
+ "Report": None,
},
+ {
+ "Name": "Timer performance autotest",
+ "Command": "timer_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Pmd perf autotest",
+ "Command": "pmd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring pmd perf autotest",
+ "Command": "ring_pmd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Distributor perf autotest",
+ "Command": "distributor_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red_perf",
+ "Command": "red_perf",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Lpm6 perf autotest",
+ "Command": "lpm6_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Lpm perf autotest",
+ "Command": "lpm_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Efd perf autotest",
+ "Command": "efd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Member perf autotest",
+ "Command": "member_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reciprocal division perf",
+ "Command": "reciprocal_division_perf",
+ "Func": default_autotest,
+ "Report": None,
+ },
#
# Please always make sure that ring_perf is the last test!
#
{
- "Prefix": "ring_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Ring performance autotest",
- "Command": "ring_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+ "Name": "Ring performance autotest",
+ "Command": "ring_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
},
]
diff --git a/test/test/autotest_runner.py b/test/test/autotest_runner.py
index a692f069..36941a40 100644
--- a/test/test/autotest_runner.py
+++ b/test/test/autotest_runner.py
@@ -3,18 +3,19 @@
# The main logic behind running autotests in parallel
+from __future__ import print_function
import StringIO
import csv
-import multiprocessing
+from multiprocessing import Pool, Queue
import pexpect
import re
import subprocess
import sys
import time
+import glob
+import os
# wait for prompt
-
-
def wait_prompt(child):
try:
child.sendline()
@@ -27,143 +28,150 @@ def wait_prompt(child):
else:
return False
-# run a test group
-# each result tuple in results list consists of:
-# result value (0 or -1)
-# result string
-# test name
-# total test run time (double)
-# raw test log
-# test report (if not available, should be None)
-#
-# this function needs to be outside AutotestRunner class
-# because otherwise Pool won't work (or rather it will require
-# quite a bit of effort to make it work).
+# get all valid NUMA nodes
+def get_numa_nodes():
+ return [
+ int(
+ re.match(r"node(\d+)", os.path.basename(node))
+ .group(1)
+ )
+ for node in glob.glob("/sys/devices/system/node/node*")
+ ]
-def run_test_group(cmdline, test_group):
- results = []
- child = None
+
+# find first (or any, really) CPU on a particular node, will be used to spread
+# processes around NUMA nodes to avoid exhausting memory on particular node
+def first_cpu_on_node(node_nr):
+ cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)[0]
+ cpu_name = os.path.basename(cpu_path)
+ m = re.match(r"cpu(\d+)", cpu_name)
+ return int(m.group(1))
+
+
+pool_child = None # per-process child
+
+
+# we initialize each worker with a queue because we need per-pool unique
+# command-line arguments, but we cannot do different arguments in an initializer
+# because the API doesn't allow per-worker initializer arguments. so, instead,
+# we will initialize with a shared queue, and dequeue command-line arguments
+# from this queue
+def pool_init(queue, result_queue):
+ global pool_child
+
+ cmdline, prefix = queue.get()
start_time = time.time()
- startuplog = None
+ name = ("Start %s" % prefix) if prefix != "" else "Start"
+
+ # use default prefix if no prefix was specified
+ prefix_cmdline = "--file-prefix=%s" % prefix if prefix != "" else ""
+
+ # append prefix to cmdline
+ cmdline = "%s %s" % (cmdline, prefix_cmdline)
+
+ # prepare logging of init
+ startuplog = StringIO.StringIO()
# run test app
try:
- # prepare logging of init
- startuplog = StringIO.StringIO()
- print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
- print >>startuplog, "\ncmdline=%s" % cmdline
+ print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
+ print("\ncmdline=%s" % cmdline, file=startuplog)
- child = pexpect.spawn(cmdline, logfile=startuplog)
+ pool_child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
- if not wait_prompt(child):
- child.close()
+ if not wait_prompt(pool_child):
+ pool_child.close()
- results.append((-1,
+ result = tuple((-1,
"Fail [No prompt]",
- "Start %s" % test_group["Prefix"],
+ name,
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+ pool_child = None
+ else:
+ result = tuple((0,
+ "Success",
+ name,
time.time() - start_time,
startuplog.getvalue(),
None))
-
- # mark all tests as failed
- for test in test_group["Tests"]:
- results.append((-1, "Fail [No prompt]", test["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
except:
- results.append((-1,
+ result = tuple((-1,
"Fail [Can't run]",
- "Start %s" % test_group["Prefix"],
+ name,
time.time() - start_time,
startuplog.getvalue(),
None))
+ pool_child = None
- # mark all tests as failed
- for t in test_group["Tests"]:
- results.append((-1, "Fail [Can't run]", t["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- # startup was successful
- results.append((0, "Success", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
+ result_queue.put(result)
- # parse the binary for available test commands
- binary = cmdline.split()[0]
- stripped = 'not stripped' not in subprocess.check_output(['file', binary])
- if not stripped:
- symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
- avail_cmds = re.findall('test_register_(\w+)', symbols)
- # run all tests in test group
- for test in test_group["Tests"]:
+# run a test
+# each result tuple in results list consists of:
+# result value (0 or -1)
+# result string
+# test name
+# total test run time (double)
+# raw test log
+# test report (if not available, should be None)
+#
+# this function needs to be outside AutotestRunner class because otherwise Pool
+# won't work (or rather it will require quite a bit of effort to make it work).
+def run_test(target, test):
+ global pool_child
- # create log buffer for each test
- # in multiprocessing environment, the logging would be
- # interleaved and will create a mess, hence the buffering
- logfile = StringIO.StringIO()
- child.logfile = logfile
+ if pool_child is None:
+ return -1, "Fail [No test process]", test["Name"], 0, "", None
- result = ()
+ # create log buffer for each test
+ # in multiprocessing environment, the logging would be
+ # interleaved and will create a mess, hence the buffering
+ logfile = StringIO.StringIO()
+ pool_child.logfile = logfile
- # make a note when the test started
- start_time = time.time()
+ # make a note when the test started
+ start_time = time.time()
- try:
- # print test name to log buffer
- print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
+ try:
+ # print test name to log buffer
+ print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
- # run test function associated with the test
- if stripped or test["Command"] in avail_cmds:
- result = test["Func"](child, test["Command"])
- else:
- result = (0, "Skipped [Not Available]")
+ # run test function associated with the test
+ result = test["Func"](pool_child, test["Command"])
- # make a note when the test was finished
- end_time = time.time()
+ # make a note when the test was finished
+ end_time = time.time()
- # append test data to the result tuple
- result += (test["Name"], end_time - start_time,
- logfile.getvalue())
+ log = logfile.getvalue()
- # call report function, if any defined, and supply it with
- # target and complete log for test run
- if test["Report"]:
- report = test["Report"](self.target, log)
+ # append test data to the result tuple
+ result += (test["Name"], end_time - start_time, log)
- # append report to results tuple
- result += (report,)
- else:
- # report is None
- result += (None,)
- except:
- # make a note when the test crashed
- end_time = time.time()
-
- # mark test as failed
- result = (-1, "Fail [Crash]", test["Name"],
- end_time - start_time, logfile.getvalue(), None)
- finally:
- # append the results to the results list
- results.append(result)
+ # call report function, if any defined, and supply it with
+ # target and complete log for test run
+ if test["Report"]:
+ report = test["Report"](target, log)
- # regardless of whether test has crashed, try quitting it
- try:
- child.sendline("quit")
- child.close()
- # if the test crashed, just do nothing instead
+ # append report to results tuple
+ result += (report,)
+ else:
+ # report is None
+ result += (None,)
except:
- # nop
- pass
+ # make a note when the test crashed
+ end_time = time.time()
+
+ # mark test as failed
+ result = (-1, "Fail [Crash]", test["Name"],
+ end_time - start_time, logfile.getvalue(), None)
# return test results
- return results
+ return result
# class representing an instance of autotests run
@@ -181,11 +189,17 @@ class AutotestRunner:
blacklist = []
whitelist = []
- def __init__(self, cmdline, target, blacklist, whitelist):
+ def __init__(self, cmdline, target, blacklist, whitelist, n_processes):
self.cmdline = cmdline
self.target = target
+ self.binary = cmdline.split()[0]
self.blacklist = blacklist
self.whitelist = whitelist
+ self.skipped = []
+ self.parallel_tests = []
+ self.non_parallel_tests = []
+ self.n_processes = n_processes
+ self.active_processes = 0
# log file filename
logfile = "%s.log" % target
@@ -199,177 +213,196 @@ class AutotestRunner:
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
- def __get_cmdline(self, test):
- cmdline = self.cmdline
-
- # append memory limitations for each test
- # otherwise tests won't run in parallel
- if "i686" not in self.target:
- cmdline += " --socket-mem=%s" % test["Memory"]
- else:
- # affinitize startup so that tests don't fail on i686
- cmdline = "taskset 1 " + cmdline
- cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
-
- # set group prefix for autotest group
- # otherwise they won't run in parallel
- cmdline += " --file-prefix=%s" % test["Prefix"]
+ def __get_cmdline(self, cpu_nr):
+ cmdline = ("taskset -c %i " % cpu_nr) + self.cmdline
return cmdline
- def add_parallel_test_group(self, test_group):
- self.parallel_test_groups.append(test_group)
+ def __process_result(self, result):
- def add_non_parallel_test_group(self, test_group):
- self.non_parallel_test_groups.append(test_group)
+ # unpack result tuple
+ test_result, result_str, test_name, \
+ test_time, log, report = result
- def __process_results(self, results):
- # this iterates over individual test results
- for i, result in enumerate(results):
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
- # increase total number of tests that were run
- # do not include "start" test
- if i > 0:
- self.n_tests += 1
+ # print results, test run time and total time since start
+ result = ("%s:" % test_name).ljust(30)
+ result += result_str.ljust(29)
+ result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
- # unpack result tuple
- test_result, result_str, test_name, \
- test_time, log, report = result
+ # don't print out total time every line, it's the same anyway
+ print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
+ # if test failed and it wasn't a "start" test
+ if test_result < 0:
+ self.fails += 1
- # print results, test run time and total time since start
- result = ("%s:" % test_name).ljust(30)
- result += result_str.ljust(29)
- result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
+ # collect logs
+ self.log_buffers.append(log)
- # don't print out total time every line, it's the same anyway
- if i == len(results) - 1:
- print(result,
- "[%02dm %02ds]" % (total_time / 60, total_time % 60))
+ # create report if it exists
+ if report:
+ try:
+ f = open("%s_%s_report.rst" %
+ (self.target, test_name), "w")
+ except IOError:
+ print("Report for %s could not be created!" % test_name)
else:
- print(result)
-
- # if test failed and it wasn't a "start" test
- if test_result < 0 and not i == 0:
- self.fails += 1
-
- # collect logs
- self.log_buffers.append(log)
-
- # create report if it exists
- if report:
- try:
- f = open("%s_%s_report.rst" %
- (self.target, test_name), "w")
- except IOError:
- print("Report for %s could not be created!" % test_name)
- else:
- with f:
- f.write(report)
-
- # write test result to CSV file
- if i != 0:
- self.csvwriter.writerow([test_name, test_result, result_str])
+ with f:
+ f.write(report)
+
+ # write test result to CSV file
+ self.csvwriter.writerow([test_name, test_result, result_str])
+
+ # this function checks individual test and decides if this test should be in
+ # the group by comparing it against whitelist/blacklist. it also checks if
+ # the test is compiled into the binary, and marks it as skipped if necessary
+ def __filter_test(self, test):
+ test_cmd = test["Command"]
+ test_id = test_cmd
+
+ # dump tests are specified in full e.g. "Dump_mempool"
+ if "_autotest" in test_id:
+ test_id = test_id[:-len("_autotest")]
+
+ # filter out blacklisted/whitelisted tests
+ if self.blacklist and test_id in self.blacklist:
+ return False
+ if self.whitelist and test_id not in self.whitelist:
+ return False
+
+ # if test wasn't compiled in, remove it as well
+
+ # parse the binary for available test commands
+ stripped = 'not stripped' not in \
+ subprocess.check_output(['file', self.binary])
+ if not stripped:
+ symbols = subprocess.check_output(['nm',
+ self.binary]).decode('utf-8')
+ avail_cmds = re.findall('test_register_(\w+)', symbols)
+
+ if test_cmd not in avail_cmds:
+ # notify user
+ result = 0, "Skipped [Not compiled]", test_id, 0, "", None
+ self.skipped.append(tuple(result))
+ return False
- # this function iterates over test groups and removes each
- # test that is not in whitelist/blacklist
- def __filter_groups(self, test_groups):
- groups_to_remove = []
+ return True
- # filter out tests from parallel test groups
- for i, test_group in enumerate(test_groups):
+ def __run_test_group(self, test_group, worker_cmdlines):
+ group_queue = Queue()
+ init_result_queue = Queue()
+ for proc, cmdline in enumerate(worker_cmdlines):
+ prefix = "test%i" % proc if len(worker_cmdlines) > 1 else ""
+ group_queue.put(tuple((cmdline, prefix)))
- # iterate over a copy so that we could safely delete individual
- # tests
- for test in test_group["Tests"][:]:
- test_id = test["Command"]
+ # create a pool of worker threads
+ # we will initialize child in the initializer, and we don't need to
+ # close the child because when the pool worker gets destroyed, child
+ # closes the process
+ pool = Pool(processes=len(worker_cmdlines),
+ initializer=pool_init,
+ initargs=(group_queue, init_result_queue))
+
+ results = []
- # dump tests are specified in full e.g. "Dump_mempool"
- if "_autotest" in test_id:
- test_id = test_id[:-len("_autotest")]
+ # process all initialization results
+ for _ in range(len(worker_cmdlines)):
+ self.__process_result(init_result_queue.get())
- # filter out blacklisted/whitelisted tests
- if self.blacklist and test_id in self.blacklist:
- test_group["Tests"].remove(test)
- continue
- if self.whitelist and test_id not in self.whitelist:
- test_group["Tests"].remove(test)
+ # run all tests asynchronously
+ for test in test_group:
+ result = pool.apply_async(run_test, (self.target, test))
+ results.append(result)
+
+ # tell the pool to stop all processes once done
+ pool.close()
+
+ # iterate while we have group execution results to get
+ while len(results) > 0:
+ # iterate over a copy to be able to safely delete results
+ # this iterates over a list of group results
+ for async_result in results[:]:
+ # if the thread hasn't finished yet, continue
+ if not async_result.ready():
continue
- # modify or remove original group
- if len(test_group["Tests"]) > 0:
- test_groups[i] = test_group
- else:
- # remember which groups should be deleted
- # put the numbers backwards so that we start
- # deleting from the end, not from the beginning
- groups_to_remove.insert(0, i)
+ res = async_result.get()
- # remove test groups that need to be removed
- for i in groups_to_remove:
- del test_groups[i]
+ self.__process_result(res)
- return test_groups
+ # remove result from results list once we're done with it
+ results.remove(async_result)
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
- self.parallel_test_groups = \
- self.__filter_groups(self.parallel_test_groups)
- self.non_parallel_test_groups = \
- self.__filter_groups(self.non_parallel_test_groups)
-
- # create a pool of worker threads
- pool = multiprocessing.Pool(processes=1)
-
- results = []
-
- # whatever happens, try to save as much logs as possible
- try:
-
- # create table header
- print("")
- print("Test name".ljust(30), "Test result".ljust(29),
- "Test".center(9), "Total".center(9))
- print("=" * 80)
-
- # make a note of tests start time
- self.start = time.time()
+ self.parallel_tests = list(
+ filter(self.__filter_test,
+ self.parallel_tests)
+ )
+ self.non_parallel_tests = list(
+ filter(self.__filter_test,
+ self.non_parallel_tests)
+ )
+
+ parallel_cmdlines = []
+ # FreeBSD doesn't have NUMA support
+ numa_nodes = get_numa_nodes()
+ if len(numa_nodes) > 0:
+ for proc in range(self.n_processes):
+ # spread cpu affinity between NUMA nodes to have less chance of
+ # running out of memory while running multiple test apps in
+ # parallel. to do that, alternate between NUMA nodes in a round
+ # robin fashion, and pick an arbitrary CPU from that node to
+ # taskset our execution to
+ numa_node = numa_nodes[self.active_processes % len(numa_nodes)]
+ cpu_nr = first_cpu_on_node(numa_node)
+ parallel_cmdlines += [self.__get_cmdline(cpu_nr)]
+ # increase number of active processes so that the next cmdline
+ # gets a different NUMA node
+ self.active_processes += 1
+ else:
+ parallel_cmdlines = [self.cmdline] * self.n_processes
- # assign worker threads to run test groups
- for test_group in self.parallel_test_groups:
- result = pool.apply_async(run_test_group,
- [self.__get_cmdline(test_group),
- test_group])
- results.append(result)
+ print("Running tests with %d workers" % self.n_processes)
- # iterate while we have group execution results to get
- while len(results) > 0:
+ # create table header
+ print("")
+ print("Test name".ljust(30) + "Test result".ljust(29) +
+ "Test".center(9) + "Total".center(9))
+ print("=" * 80)
- # iterate over a copy to be able to safely delete results
- # this iterates over a list of group results
- for group_result in results[:]:
+ if len(self.skipped):
+ print("Skipped autotests:")
- # if the thread hasn't finished yet, continue
- if not group_result.ready():
- continue
+ # print out any skipped tests
+ for result in self.skipped:
+ # unpack result tuple
+ test_result, result_str, test_name, _, _, _ = result
+ self.csvwriter.writerow([test_name, test_result, result_str])
- res = group_result.get()
+ t = ("%s:" % test_name).ljust(30)
+ t += result_str.ljust(29)
+ t += "[00m 00s]"
- self.__process_results(res)
+ print(t)
- # remove result from results list once we're done with it
- results.remove(group_result)
+ # make a note of tests start time
+ self.start = time.time()
- # run non_parallel tests. they are run one by one, synchronously
- for test_group in self.non_parallel_test_groups:
- group_result = run_test_group(
- self.__get_cmdline(test_group), test_group)
+ # whatever happens, try to save as much logs as possible
+ try:
+ if len(self.parallel_tests) > 0:
+ print("Parallel autotests:")
+ self.__run_test_group(self.parallel_tests, parallel_cmdlines)
- self.__process_results(group_result)
+ if len(self.non_parallel_tests) > 0:
+ print("Non-parallel autotests:")
+ self.__run_test_group(self.non_parallel_tests, [self.cmdline])
# get total run time
cur_time = time.time()
diff --git a/test/test/commands.c b/test/test/commands.c
index cf0b726b..94fbc310 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -132,11 +132,13 @@ static void cmd_dump_parsed(void *parsed_result,
else if (!strcmp(res->dump, "dump_mempool"))
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
- rte_eal_devargs_dump(stdout);
+ rte_devargs_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
else if (!strcmp(res->dump, "dump_malloc_stats"))
rte_malloc_dump_stats(stdout, NULL);
+ else if (!strcmp(res->dump, "dump_malloc_heaps"))
+ rte_malloc_dump_heaps(stdout);
}
cmdline_parse_token_string_t cmd_dump_dump =
@@ -147,6 +149,7 @@ cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_malloc_stats#"
+ "dump_malloc_heaps#"
"dump_devargs#"
"dump_log_types");
diff --git a/test/test/meson.build b/test/test/meson.build
index eb3d87a4..b1dd6eca 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -8,6 +8,7 @@ test_sources = files('commands.c',
'test_alarm.c',
'test_atomic.c',
'test_barrier.c',
+ 'test_bpf.c',
'test_byteorder.c',
'test_cmdline.c',
'test_cmdline_cirbuf.c',
@@ -21,10 +22,10 @@ test_sources = files('commands.c',
'test_cpuflags.c',
'test_crc.c',
'test_cryptodev.c',
+ 'test_cryptodev_asym.c',
'test_cryptodev_blockcipher.c',
'test_cycles.c',
'test_debug.c',
- 'test_devargs.c',
'test_distributor.c',
'test_distributor_perf.c',
'test_eal_flags.c',
@@ -98,6 +99,7 @@ test_sources = files('commands.c',
)
test_deps = ['acl',
+ 'bpf',
'cfgfile',
'cmdline',
'cryptodev',
@@ -129,13 +131,14 @@ test_names = [
'cryptodev_qat_autotest',
'cryptodev_aesni_mb_autotest',
'cryptodev_openssl_autotest',
+ 'cryptodev_openssl_asym_autotest',
'cryptodev_aesni_gcm_autotest',
'cryptodev_null_autotest',
'cryptodev_sw_snow3g_autotest',
'cryptodev_sw_kasumi_autotest',
'cryptodev_sw_zuc_autotest',
'cryptodev_sw_armv8_autotest',
- 'cryptodev_sw_mrvl_autotest',
+ 'cryptodev_sw_mvsam_autotest',
'cryptodev_dpaa2_sec_autotest',
'cryptodev_dpaa_sec_autotest',
'cycles_autotest',
@@ -234,7 +237,20 @@ if dpdk_conf.has('RTE_LIBRTE_KNI')
test_deps += 'kni'
endif
+cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ cflags += '-Wno-format-truncation'
+endif
+
test_dep_objs = []
+compress_test_dep = dependency('zlib', required: false)
+if compress_test_dep.found()
+ test_dep_objs += compress_test_dep
+ test_sources += 'test_compressdev.c'
+ test_deps += 'compressdev'
+ test_names += 'compressdev_autotest'
+endif
+
foreach d:test_deps
def_lib = get_option('default_library')
test_dep_objs += get_variable(def_lib + '_rte_' + d)
@@ -251,7 +267,7 @@ if get_option('tests')
test_sources,
link_whole: link_libs,
dependencies: test_dep_objs,
- c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'],
+ c_args: [cflags, '-DALLOW_EXPERIMENTAL_API'],
install_rpath: driver_install_path,
install: false)
diff --git a/test/test/process.h b/test/test/process.h
index 11986d5c..ba3a1850 100644
--- a/test/test/process.h
+++ b/test/test/process.h
@@ -5,6 +5,11 @@
#ifndef _PROCESS_H_
#define _PROCESS_H_
+#include <limits.h> /* PATH_MAX */
+#include <libgen.h> /* basename et al */
+#include <stdlib.h> /* NULL */
+#include <unistd.h> /* readlink */
+
#ifdef RTE_EXEC_ENV_BSDAPP
#define self "curproc"
#define exe "file"
@@ -61,4 +66,28 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
return status;
}
+/* FreeBSD doesn't support file prefixes, so force compile failures for any
+ * tests attempting to use this function on FreeBSD.
+ */
+#ifdef RTE_EXEC_ENV_LINUXAPP
+static char *
+get_current_prefix(char *prefix, int size)
+{
+ char path[PATH_MAX] = {0};
+ char buf[PATH_MAX] = {0};
+
+ /* get file for config (fd is always 3) */
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
+
+ /* return NULL on error */
+ if (readlink(path, buf, sizeof(buf)) == -1)
+ return NULL;
+
+ /* get the prefix */
+ snprintf(prefix, size, "%s", basename(dirname(buf)));
+
+ return prefix;
+}
+#endif
+
#endif /* _PROCESS_H_ */
diff --git a/test/test/resource.c b/test/test/resource.c
index 0e2b62cd..34465f16 100644
--- a/test/test/resource.c
+++ b/test/test/resource.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <stdio.h>
diff --git a/test/test/resource.h b/test/test/resource.h
index 1e961221..223fa22a 100644
--- a/test/test/resource.h
+++ b/test/test/resource.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#ifndef _RESOURCE_H_
diff --git a/test/test/test_bpf.c b/test/test/test_bpf.c
new file mode 100644
index 00000000..fa17c4f7
--- /dev/null
+++ b/test/test/test_bpf.c
@@ -0,0 +1,1926 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_memory.h>
+#include <rte_debug.h>
+#include <rte_hexdump.h>
+#include <rte_random.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_bpf.h>
+
+#include "test.h"
+
+/*
+ * Basic functional tests for librte_bpf.
+ * The main procedure - load eBPF program, execute it and
+ * compare restuls with expected values.
+ */
+
+struct dummy_offset {
+ uint64_t u64;
+ uint32_t u32;
+ uint16_t u16;
+ uint8_t u8;
+};
+
+struct dummy_vect8 {
+ struct dummy_offset in[8];
+ struct dummy_offset out[8];
+};
+
+#define TEST_FILL_1 0xDEADBEEF
+
+#define TEST_MUL_1 21
+#define TEST_MUL_2 -100
+
+#define TEST_SHIFT_1 15
+#define TEST_SHIFT_2 33
+
+#define TEST_JCC_1 0
+#define TEST_JCC_2 -123
+#define TEST_JCC_3 5678
+#define TEST_JCC_4 TEST_FILL_1
+
+struct bpf_test {
+ const char *name;
+ size_t arg_sz;
+ struct rte_bpf_prm prm;
+ void (*prepare)(void *);
+ int (*check_result)(uint64_t, const void *);
+ uint32_t allow_fail;
+};
+
+/*
+ * Compare return value and result data with expected ones.
+ * Report a failure if they don't match.
+ */
+static int
+cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
+ const void *exp_res, const void *ret_res, size_t res_sz)
+{
+ int32_t ret;
+
+ ret = 0;
+ if (exp_rc != ret_rc) {
+ printf("%s@%d: invalid return value, expected: 0x%" PRIx64
+ ",result: 0x%" PRIx64 "\n",
+ func, __LINE__, exp_rc, ret_rc);
+ ret |= -1;
+ }
+
+ if (memcmp(exp_res, ret_res, res_sz) != 0) {
+ printf("%s: invalid value\n", func);
+ rte_memdump(stdout, "expected", exp_res, res_sz);
+ rte_memdump(stdout, "result", ret_res, res_sz);
+ ret |= -1;
+ }
+
+ return ret;
+}
+
+/* store immediate test-cases */
+static const struct ebpf_insn test_store1_prog[] = {
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u8),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u16),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ .imm = TEST_FILL_1,
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_store1_prepare(void *arg)
+{
+ struct dummy_offset *df;
+
+ df = arg;
+ memset(df, 0, sizeof(*df));
+}
+
+static int
+test_store1_check(uint64_t rc, const void *arg)
+{
+ const struct dummy_offset *dft;
+ struct dummy_offset dfe;
+
+ dft = arg;
+
+ memset(&dfe, 0, sizeof(dfe));
+ dfe.u64 = (int32_t)TEST_FILL_1;
+ dfe.u32 = dfe.u64;
+ dfe.u16 = dfe.u64;
+ dfe.u8 = dfe.u64;
+
+ return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
+}
+
+/* store register test-cases */
+static const struct ebpf_insn test_store2_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u8),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u16),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+/* load test-cases */
+static const struct ebpf_insn test_load1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u8),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return sum */
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_load1_prepare(void *arg)
+{
+ struct dummy_offset *df;
+
+ df = arg;
+
+ memset(df, 0, sizeof(*df));
+ df->u64 = (int32_t)TEST_FILL_1;
+ df->u32 = df->u64;
+ df->u16 = df->u64;
+ df->u8 = df->u64;
+}
+
+static int
+test_load1_check(uint64_t rc, const void *arg)
+{
+ uint64_t v;
+ const struct dummy_offset *dft;
+
+ dft = arg;
+ v = dft->u64;
+ v += dft->u32;
+ v += dft->u16;
+ v += dft->u8;
+
+ return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
+}
+
+/* alu mul test-cases */
+static const struct ebpf_insn test_mul1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_MUL | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (BPF_ALU | BPF_MUL | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_mul1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v;
+
+ dv = arg;
+
+ v = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u32 = v;
+ dv->in[1].u64 = v << 12 | v >> 6;
+ dv->in[2].u32 = -v;
+}
+
+static int
+test_mul1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 * TEST_MUL_1;
+ r3 *= TEST_MUL_2;
+ r4 = (uint32_t)(r4 * r2);
+ r4 *= r3;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* alu shift test-cases */
+static const struct ebpf_insn test_shift1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_SHIFT_1,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_SHIFT_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_RSH | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint64_t) * CHAR_BIT - 1,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint32_t) * CHAR_BIT - 1,
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_shift1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v;
+
+ dv = arg;
+
+ v = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u32 = v;
+ dv->in[1].u64 = v << 12 | v >> 6;
+ dv->in[2].u32 = (-v ^ 5);
+}
+
+static int
+test_shift1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 << TEST_SHIFT_1;
+ r3 = (int64_t)r3 >> TEST_SHIFT_2;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+
+ r2 = (uint32_t)r2 >> r4;
+ r3 <<= r4;
+
+ dve.out[2].u64 = r2;
+ dve.out[3].u64 = r3;
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
+ r3 = (int64_t)r3 >> r2;
+ r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
+ r4 = (uint32_t)r4 << r2;
+
+ dve.out[4].u64 = r4;
+ dve.out[5].u64 = r3;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* jmp test-cases */
+static const struct ebpf_insn test_jump1_prog[] = {
+
+ [0] = {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0,
+ },
+ [1] = {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ [2] = {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ [3] = {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u32),
+ },
+ [4] = {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ [5] = {
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_JCC_1,
+ .off = 8,
+ },
+ [6] = {
+ .code = (BPF_JMP | EBPF_JSLE | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_JCC_2,
+ .off = 9,
+ },
+ [7] = {
+ .code = (BPF_JMP | BPF_JGT | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_JCC_3,
+ .off = 10,
+ },
+ [8] = {
+ .code = (BPF_JMP | BPF_JSET | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_JCC_4,
+ .off = 11,
+ },
+ [9] = {
+ .code = (BPF_JMP | EBPF_JNE | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ .off = 12,
+ },
+ [10] = {
+ .code = (BPF_JMP | EBPF_JSGT | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_4,
+ .off = 13,
+ },
+ [11] = {
+ .code = (BPF_JMP | EBPF_JLE | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_5,
+ .off = 14,
+ },
+ [12] = {
+ .code = (BPF_JMP | BPF_JSET | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_5,
+ .off = 15,
+ },
+ [13] = {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ [14] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x1,
+ },
+ [15] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -10,
+ },
+ [16] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x2,
+ },
+ [17] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -11,
+ },
+ [18] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x4,
+ },
+ [19] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -12,
+ },
+ [20] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x8,
+ },
+ [21] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -13,
+ },
+ [22] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x10,
+ },
+ [23] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -14,
+ },
+ [24] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x20,
+ },
+ [25] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -15,
+ },
+ [26] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x40,
+ },
+ [27] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -16,
+ },
+ [28] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x80,
+ },
+ [29] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -17,
+ },
+};
+
+static void
+test_jump1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v1, v2;
+
+ dv = arg;
+
+ v1 = rte_rand();
+ v2 = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u64 = v1;
+ dv->in[1].u64 = v2;
+ dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
+ dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
+}
+
+static int
+test_jump1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4, r5, rv;
+ const struct dummy_vect8 *dvt;
+
+ dvt = arg;
+
+ rv = 0;
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[0].u64;
+ r4 = dvt->in[1].u32;
+ r5 = dvt->in[1].u64;
+
+ if (r2 == TEST_JCC_1)
+ rv |= 0x1;
+ if ((int64_t)r3 <= TEST_JCC_2)
+ rv |= 0x2;
+ if (r4 > TEST_JCC_3)
+ rv |= 0x4;
+ if (r5 & TEST_JCC_4)
+ rv |= 0x8;
+ if (r2 != r3)
+ rv |= 0x10;
+ if ((int64_t)r2 > (int64_t)r4)
+ rv |= 0x20;
+ if (r2 <= r5)
+ rv |= 0x40;
+ if (r3 & r5)
+ rv |= 0x80;
+
+ return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
+}
+
+/* alu (add, sub, and, or, xor, neg) test-cases */
+static const struct ebpf_insn test_alu1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ALU | BPF_XOR | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_OR | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (BPF_ALU | BPF_SUB | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_5,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_AND | BPF_X),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[6].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_vect8, out[7].u64),
+ },
+ /* return (-r2 + (-r3)) */
+ {
+ .code = (BPF_ALU | BPF_NEG),
+ .dst_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_NEG),
+ .dst_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_alu1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4, r5, rv;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[0].u64;
+ r4 = dvt->in[1].u32;
+ r5 = dvt->in[1].u64;
+
+ r2 = (uint32_t)r2 & TEST_FILL_1;
+ r3 |= (int32_t) TEST_FILL_1;
+ r4 = (uint32_t)r4 ^ TEST_FILL_1;
+ r5 += (int32_t)TEST_FILL_1;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+ dve.out[3].u64 = r5;
+
+ r2 = (uint32_t)r2 | (uint32_t)r3;
+ r3 ^= r4;
+ r4 = (uint32_t)r4 - (uint32_t)r5;
+ r5 &= r2;
+
+ dve.out[4].u64 = r2;
+ dve.out[5].u64 = r3;
+ dve.out[6].u64 = r4;
+ dve.out[7].u64 = r5;
+
+ r2 = -(int32_t)r2;
+ rv = (uint32_t)r2;
+ r3 = -r3;
+ rv += r3;
+
+ return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* endianness conversions (BE->LE/LE->BE) test-cases */
+static const struct ebpf_insn test_bele1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint16_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_3,
+ .imm = sizeof(uint32_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_4,
+ .imm = sizeof(uint64_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint16_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_3,
+ .imm = sizeof(uint32_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_4,
+ .imm = sizeof(uint64_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_bele1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+
+ dv = arg;
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u64 = rte_rand();
+ dv->in[0].u32 = dv->in[0].u64;
+ dv->in[0].u16 = dv->in[0].u64;
+}
+
+static int
+test_bele1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u16;
+ r3 = dvt->in[0].u32;
+ r4 = dvt->in[0].u64;
+
+ r2 = rte_cpu_to_be_16(r2);
+ r3 = rte_cpu_to_be_32(r3);
+ r4 = rte_cpu_to_be_64(r4);
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ r2 = dvt->in[0].u16;
+ r3 = dvt->in[0].u32;
+ r4 = dvt->in[0].u64;
+
+ r2 = rte_cpu_to_le_16(r2);
+ r3 = rte_cpu_to_le_32(r3);
+ r4 = rte_cpu_to_le_64(r4);
+
+ dve.out[3].u64 = r2;
+ dve.out[4].u64 = r3;
+ dve.out[5].u64 = r4;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* atomic add test-cases */
+static const struct ebpf_insn test_xadd1_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = -1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_6,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_6,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_6,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_7,
+ .imm = TEST_JCC_2,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_7,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_7,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_8,
+ .imm = TEST_JCC_3,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_8,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_8,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_xadd1_check(uint64_t rc, const void *arg)
+{
+ uint64_t rv;
+ const struct dummy_offset *dft;
+ struct dummy_offset dfe;
+
+ dft = arg;
+ memset(&dfe, 0, sizeof(dfe));
+
+ rv = 1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = -1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = (int32_t)TEST_FILL_1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_MUL_1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_MUL_2;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_JCC_2;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_JCC_3;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
+}
+
+/* alu div test-cases */
+static const struct ebpf_insn test_div1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_DIV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_ALU | BPF_MOD | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ /* check that we can handle division by zero gracefully. */
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[3].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_DIV | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_div1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 / TEST_MUL_1;
+ r3 %= TEST_MUL_2;
+ r2 |= 1;
+ r3 |= 1;
+ r4 = (uint32_t)(r4 % r2);
+ r4 /= r3;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ /*
+ * in the test prog we attempted to divide by zero.
+ * so return value should return 0.
+ */
+ return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* call test-cases */
+static const struct ebpf_insn test_call1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_2,
+ .off = -4,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_3,
+ .off = -16,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 4,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = 16,
+ },
+ {
+ .code = (BPF_JMP | EBPF_CALL),
+ .imm = 0,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ .off = -4,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_10,
+ .off = -16
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
+{
+ const struct dummy_offset *dv;
+
+ dv = p;
+
+ v32[0] += dv->u16;
+ v64[0] += dv->u8;
+}
+
+static int
+test_call1_check(uint64_t rc, const void *arg)
+{
+ uint32_t v32;
+ uint64_t v64;
+ const struct dummy_offset *dv;
+
+ dv = arg;
+
+ v32 = dv->u32;
+ v64 = dv->u64;
+ dummy_func1(arg, &v32, &v64);
+ v64 += v32;
+
+ if (v64 != rc) {
+ printf("%s@%d: invalid return value "
+ "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
+ __func__, __LINE__, v64, rc);
+ return -1;
+ }
+ return 0;
+ return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
+}
+
+static const struct rte_bpf_xsym test_call1_xsym[] = {
+ {
+ .name = RTE_STR(dummy_func1),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = {
+ .val = (void *)dummy_func1,
+ .nb_args = 3,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(uint32_t),
+ },
+ [2] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(uint64_t),
+ },
+ },
+ },
+ },
+};
+
+static const struct ebpf_insn test_call2_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_1,
+ .imm = -(int32_t)sizeof(struct dummy_offset),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = -2 * (int32_t)sizeof(struct dummy_offset),
+ },
+ {
+ .code = (BPF_JMP | EBPF_CALL),
+ .imm = 0,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u64)),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u32)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u16)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u8)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+
+};
+
+static void
+dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
+{
+ uint64_t v;
+
+ v = 0;
+ a->u64 = v++;
+ a->u32 = v++;
+ a->u16 = v++;
+ a->u8 = v++;
+ b->u64 = v++;
+ b->u32 = v++;
+ b->u16 = v++;
+ b->u8 = v++;
+}
+
+static int
+test_call2_check(uint64_t rc, const void *arg)
+{
+ uint64_t v;
+ struct dummy_offset a, b;
+
+ RTE_SET_USED(arg);
+
+ dummy_func2(&a, &b);
+ v = a.u64 + a.u32 + b.u16 + b.u8;
+
+ if (v != rc) {
+ printf("%s@%d: invalid return value "
+ "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
+ __func__, __LINE__, v, rc);
+ return -1;
+ }
+ return 0;
+}
+
+static const struct rte_bpf_xsym test_call2_xsym[] = {
+ {
+ .name = RTE_STR(dummy_func2),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = {
+ .val = (void *)dummy_func2,
+ .nb_args = 2,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ },
+ },
+};
+
+static const struct bpf_test tests[] = {
+ {
+ .name = "test_store1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_store1_prog,
+ .nb_ins = RTE_DIM(test_store1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_store1_check,
+ },
+ {
+ .name = "test_store2",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_store2_prog,
+ .nb_ins = RTE_DIM(test_store2_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_store1_check,
+ },
+ {
+ .name = "test_load1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_load1_prog,
+ .nb_ins = RTE_DIM(test_load1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_load1_prepare,
+ .check_result = test_load1_check,
+ },
+ {
+ .name = "test_mul1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_mul1_prog,
+ .nb_ins = RTE_DIM(test_mul1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_mul1_prepare,
+ .check_result = test_mul1_check,
+ },
+ {
+ .name = "test_shift1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_shift1_prog,
+ .nb_ins = RTE_DIM(test_shift1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_shift1_prepare,
+ .check_result = test_shift1_check,
+ },
+ {
+ .name = "test_jump1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_jump1_prog,
+ .nb_ins = RTE_DIM(test_jump1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_jump1_prepare,
+ .check_result = test_jump1_check,
+ },
+ {
+ .name = "test_alu1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_alu1_prog,
+ .nb_ins = RTE_DIM(test_alu1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_jump1_prepare,
+ .check_result = test_alu1_check,
+ },
+ {
+ .name = "test_bele1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_bele1_prog,
+ .nb_ins = RTE_DIM(test_bele1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_bele1_prepare,
+ .check_result = test_bele1_check,
+ },
+ {
+ .name = "test_xadd1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_xadd1_prog,
+ .nb_ins = RTE_DIM(test_xadd1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_xadd1_check,
+ },
+ {
+ .name = "test_div1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_div1_prog,
+ .nb_ins = RTE_DIM(test_div1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_mul1_prepare,
+ .check_result = test_div1_check,
+ },
+ {
+ .name = "test_call1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_call1_prog,
+ .nb_ins = RTE_DIM(test_call1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ .xsym = test_call1_xsym,
+ .nb_xsym = RTE_DIM(test_call1_xsym),
+ },
+ .prepare = test_load1_prepare,
+ .check_result = test_call1_check,
+ /* for now don't support function calls on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_call2",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_call2_prog,
+ .nb_ins = RTE_DIM(test_call2_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ .xsym = test_call2_xsym,
+ .nb_xsym = RTE_DIM(test_call2_xsym),
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_call2_check,
+ /* for now don't support function calls on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+};
+
+static int
+run_test(const struct bpf_test *tst)
+{
+ int32_t ret, rv;
+ int64_t rc;
+ struct rte_bpf *bpf;
+ struct rte_bpf_jit jit;
+ uint8_t tbuf[tst->arg_sz];
+
+ printf("%s(%s) start\n", __func__, tst->name);
+
+ bpf = rte_bpf_load(&tst->prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ return -1;
+ }
+
+ tst->prepare(tbuf);
+
+ rc = rte_bpf_exec(bpf, tbuf);
+ ret = tst->check_result(rc, tbuf);
+ if (ret != 0) {
+ printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
+ __func__, __LINE__, tst->name, ret, strerror(ret));
+ }
+
+ rte_bpf_get_jit(bpf, &jit);
+ if (jit.func == NULL)
+ return 0;
+
+ tst->prepare(tbuf);
+ rc = jit.func(tbuf);
+ rv = tst->check_result(rc, tbuf);
+ ret |= rv;
+ if (rv != 0) {
+ printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
+ __func__, __LINE__, tst->name, rv, strerror(ret));
+ }
+
+ rte_bpf_destroy(bpf);
+ return ret;
+
+}
+
+static int
+test_bpf(void)
+{
+ int32_t rc, rv;
+ uint32_t i;
+
+ rc = 0;
+ for (i = 0; i != RTE_DIM(tests); i++) {
+ rv = run_test(tests + i);
+ if (tests[i].allow_fail == 0)
+ rc |= rv;
+ }
+
+ return rc;
+}
+
+REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
diff --git a/test/test/test_cmdline_cirbuf.c b/test/test/test_cmdline_cirbuf.c
index e9193f66..8ac326cb 100644
--- a/test/test/test_cmdline_cirbuf.c
+++ b/test/test/test_cmdline_cirbuf.c
@@ -483,7 +483,7 @@ test_cirbuf_string_get_del_partial(void)
memset(tmp, 0, sizeof(tmp));
memset(tmp2, 0, sizeof(tmp));
- snprintf(tmp2, sizeof(tmp2), "%s", CIRBUF_STR_HEAD);
+ strlcpy(tmp2, CIRBUF_STR_HEAD, sizeof(tmp2));
/*
* initialize circular buffer
diff --git a/test/test/test_cmdline_ipaddr.c b/test/test/test_cmdline_ipaddr.c
index 2eb5a774..8ee7f628 100644
--- a/test/test/test_cmdline_ipaddr.c
+++ b/test/test/test_cmdline_ipaddr.c
@@ -87,8 +87,6 @@ const struct ipaddr_str ipaddr_valid_strs[] = {
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"192.168.1.0/24", {AF_INET, {IP4(192,168,1,0)}, 24},
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
- {"012.34.56.78/24", {AF_INET, {IP4(12,34,56,78)}, 24},
- CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"34.56.78.90/1", {AF_INET, {IP4(34,56,78,90)}, 1},
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"::", {AF_INET6, {IP6(0,0,0,0,0,0,0,0)}, 0},
diff --git a/test/test/test_common.c b/test/test/test_common.c
index d0342430..7a67e458 100644
--- a/test/test/test_common.c
+++ b/test/test/test_common.c
@@ -3,6 +3,7 @@
*/
#include <stdio.h>
+#include <inttypes.h>
#include <string.h>
#include <math.h>
#include <rte_common.h>
@@ -70,6 +71,9 @@ test_align(void)
#define FAIL_ALIGN(x, i, p)\
{printf(x "() test failed: %u %u\n", i, p);\
return -1;}
+#define FAIL_ALIGN64(x, j, q)\
+ {printf(x "() test failed: %"PRIu64" %"PRIu64"\n", j, q);\
+ return -1; }
#define ERROR_FLOOR(res, i, pow) \
(res % pow) || /* check if not aligned */ \
((res / pow) != (i / pow)) /* check if correct alignment */
@@ -80,6 +84,7 @@ test_align(void)
val / pow != (i / pow) + 1) /* if not aligned, hence +1 */
uint32_t i, p, val;
+ uint64_t j, q;
for (i = 1, p = 1; i <= MAX_NUM; i ++) {
if (rte_align32pow2(i) != p)
@@ -88,6 +93,27 @@ test_align(void)
p <<= 1;
}
+ for (i = 1, p = 1; i <= MAX_NUM; i++) {
+ if (rte_align32prevpow2(i) != p)
+ FAIL_ALIGN("rte_align32prevpow2", i, p);
+ if (rte_is_power_of_2(i + 1))
+ p = i + 1;
+ }
+
+ for (j = 1, q = 1; j <= MAX_NUM ; j++) {
+ if (rte_align64pow2(j) != q)
+ FAIL_ALIGN64("rte_align64pow2", j, q);
+ if (j == q)
+ q <<= 1;
+ }
+
+ for (j = 1, q = 1; j <= MAX_NUM ; j++) {
+ if (rte_align64prevpow2(j) != q)
+ FAIL_ALIGN64("rte_align64prevpow2", j, q);
+ if (rte_is_power_of_2(j + 1))
+ q = j + 1;
+ }
+
for (p = 2; p <= MAX_NUM; p <<= 1) {
if (!rte_is_power_of_2(p))
@@ -128,6 +154,18 @@ test_align(void)
FAIL("rte_is_aligned");
}
}
+
+ for (p = 1; p <= MAX_NUM / 2; p++) {
+ for (i = 1; i <= MAX_NUM / 2; i++) {
+ val = RTE_ALIGN_MUL_CEIL(i, p);
+ if (val % p != 0 || val < i)
+ FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p);
+ val = RTE_ALIGN_MUL_FLOOR(i, p);
+ if (val % p != 0 || val > i)
+ FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p);
+ }
+ }
+
return 0;
}
diff --git a/test/test/test_compressdev.c b/test/test/test_compressdev.c
new file mode 100644
index 00000000..86453882
--- /dev/null
+++ b/test/test/test_compressdev.c
@@ -0,0 +1,1486 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <string.h>
+#include <zlib.h>
+#include <math.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev.h>
+
+#include "test_compressdev_test_buffer.h"
+#include "test.h"
+
+#define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
+
+#define DEFAULT_WINDOW_SIZE 15
+#define DEFAULT_MEM_LEVEL 8
+#define MAX_DEQD_RETRIES 10
+#define DEQUEUE_WAIT_TIME 10000
+
+/*
+ * 30% extra size for compressed data compared to original data,
+ * in case data size cannot be reduced and it is actually bigger
+ * due to the compress block headers
+ */
+#define COMPRESS_BUF_SIZE_RATIO 1.3
+#define NUM_LARGE_MBUFS 16
+#define SMALL_SEG_SIZE 256
+#define MAX_SEGS 16
+#define NUM_OPS 16
+#define NUM_MAX_XFORMS 16
+#define NUM_MAX_INFLIGHT_OPS 128
+#define CACHE_SIZE 0
+
+const char *
+huffman_type_strings[] = {
+ [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
+ [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
+ [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
+};
+
+enum zlib_direction {
+ ZLIB_NONE,
+ ZLIB_COMPRESS,
+ ZLIB_DECOMPRESS,
+ ZLIB_ALL
+};
+
+struct priv_op_data {
+ uint16_t orig_idx;
+};
+
+struct comp_testsuite_params {
+ struct rte_mempool *large_mbuf_pool;
+ struct rte_mempool *small_mbuf_pool;
+ struct rte_mempool *op_pool;
+ struct rte_comp_xform *def_comp_xform;
+ struct rte_comp_xform *def_decomp_xform;
+};
+
+static struct comp_testsuite_params testsuite_params = { 0 };
+
+static void
+testsuite_teardown(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+
+ rte_mempool_free(ts_params->large_mbuf_pool);
+ rte_mempool_free(ts_params->small_mbuf_pool);
+ rte_mempool_free(ts_params->op_pool);
+ rte_free(ts_params->def_comp_xform);
+ rte_free(ts_params->def_decomp_xform);
+}
+
+static int
+testsuite_setup(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint32_t max_buf_size = 0;
+ unsigned int i;
+
+ if (rte_compressdev_count() == 0) {
+ RTE_LOG(ERR, USER1, "Need at least one compress device\n");
+ return TEST_FAILED;
+ }
+
+ RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
+ rte_compressdev_name_get(0));
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
+ max_buf_size = RTE_MAX(max_buf_size,
+ strlen(compress_test_bufs[i]) + 1);
+
+ /*
+ * Buffers to be used in compression and decompression.
+ * Since decompressed data might be larger than
+ * compressed data (due to block header),
+ * buffers should be big enough for both cases.
+ */
+ max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
+ ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
+ NUM_LARGE_MBUFS,
+ CACHE_SIZE, 0,
+ max_buf_size + RTE_PKTMBUF_HEADROOM,
+ rte_socket_id());
+ if (ts_params->large_mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
+ return TEST_FAILED;
+ }
+
+ /* Create mempool with smaller buffers for SGL testing */
+ ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
+ NUM_LARGE_MBUFS * MAX_SEGS,
+ CACHE_SIZE, 0,
+ SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
+ rte_socket_id());
+ if (ts_params->small_mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
+ goto exit;
+ }
+
+ ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
+ 0, sizeof(struct priv_op_data),
+ rte_socket_id());
+ if (ts_params->op_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
+ goto exit;
+ }
+
+ ts_params->def_comp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_comp_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Default compress xform could not be created\n");
+ goto exit;
+ }
+ ts_params->def_decomp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_decomp_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Default decompress xform could not be created\n");
+ goto exit;
+ }
+
+ /* Initializes default values for compress/decompress xforms */
+ ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
+ ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
+ ts_params->def_comp_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
+ ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
+
+ ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
+ ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
+ ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
+
+ return TEST_SUCCESS;
+
+exit:
+ testsuite_teardown();
+
+ return TEST_FAILED;
+}
+
+static int
+generic_ut_setup(void)
+{
+ /* Configure compressdev (one device, one queue pair) */
+ struct rte_compressdev_config config = {
+ .socket_id = rte_socket_id(),
+ .nb_queue_pairs = 1,
+ .max_nb_priv_xforms = NUM_MAX_XFORMS,
+ .max_nb_streams = 0
+ };
+
+ if (rte_compressdev_configure(0, &config) < 0) {
+ RTE_LOG(ERR, USER1, "Device configuration failed\n");
+ return -1;
+ }
+
+ if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
+ rte_socket_id()) < 0) {
+ RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
+ return -1;
+ }
+
+ if (rte_compressdev_start(0) < 0) {
+ RTE_LOG(ERR, USER1, "Device could not be started\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+generic_ut_teardown(void)
+{
+ rte_compressdev_stop(0);
+ if (rte_compressdev_close(0) < 0)
+ RTE_LOG(ERR, USER1, "Device could not be closed\n");
+}
+
+static int
+test_compressdev_invalid_configuration(void)
+{
+ struct rte_compressdev_config invalid_config;
+ struct rte_compressdev_config valid_config = {
+ .socket_id = rte_socket_id(),
+ .nb_queue_pairs = 1,
+ .max_nb_priv_xforms = NUM_MAX_XFORMS,
+ .max_nb_streams = 0
+ };
+ struct rte_compressdev_info dev_info;
+
+ /* Invalid configuration with 0 queue pairs */
+ memcpy(&invalid_config, &valid_config,
+ sizeof(struct rte_compressdev_config));
+ invalid_config.nb_queue_pairs = 0;
+
+ TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
+ "Device configuration was successful "
+ "with no queue pairs (invalid)\n");
+
+ /*
+ * Invalid configuration with too many queue pairs
+ * (if there is an actual maximum number of queue pairs)
+ */
+ rte_compressdev_info_get(0, &dev_info);
+ if (dev_info.max_nb_queue_pairs != 0) {
+ memcpy(&invalid_config, &valid_config,
+ sizeof(struct rte_compressdev_config));
+ invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
+
+ TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
+ "Device configuration was successful "
+ "with too many queue pairs (invalid)\n");
+ }
+
+ /* Invalid queue pair setup, with no number of queue pairs set */
+ TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
+ NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
+ "Queue pair setup was successful "
+ "with no queue pairs set (invalid)\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+compare_buffers(const char *buffer1, uint32_t buffer1_len,
+ const char *buffer2, uint32_t buffer2_len)
+{
+ if (buffer1_len != buffer2_len) {
+ RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
+ return -1;
+ }
+
+ if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
+ RTE_LOG(ERR, USER1, "Buffers are different\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Maps compressdev and Zlib flush flags
+ */
+static int
+map_zlib_flush_flag(enum rte_comp_flush_flag flag)
+{
+ switch (flag) {
+ case RTE_COMP_FLUSH_NONE:
+ return Z_NO_FLUSH;
+ case RTE_COMP_FLUSH_SYNC:
+ return Z_SYNC_FLUSH;
+ case RTE_COMP_FLUSH_FULL:
+ return Z_FULL_FLUSH;
+ case RTE_COMP_FLUSH_FINAL:
+ return Z_FINISH;
+ /*
+ * There should be only the values above,
+ * so this should never happen
+ */
+ default:
+ return -1;
+ }
+}
+
+static int
+compress_zlib(struct rte_comp_op *op,
+ const struct rte_comp_xform *xform, int mem_level)
+{
+ z_stream stream;
+ int zlib_flush;
+ int strategy, window_bits, comp_level;
+ int ret = TEST_FAILED;
+ uint8_t *single_src_buf = NULL;
+ uint8_t *single_dst_buf = NULL;
+
+ /* initialize zlib stream */
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ strategy = Z_FIXED;
+ else
+ strategy = Z_DEFAULT_STRATEGY;
+
+ /*
+ * Window bits is the base two logarithm of the window size (in bytes).
+ * When doing raw DEFLATE, this number will be negative.
+ */
+ window_bits = -(xform->compress.window_size);
+
+ comp_level = xform->compress.level;
+
+ if (comp_level != RTE_COMP_LEVEL_NONE)
+ ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
+ window_bits, mem_level, strategy);
+ else
+ ret = deflateInit(&stream, Z_NO_COMPRESSION);
+
+ if (ret != Z_OK) {
+ printf("Zlib deflate could not be initialized\n");
+ goto exit;
+ }
+
+ /* Assuming stateless operation */
+ /* SGL */
+ if (op->m_src->nb_segs > 1) {
+ single_src_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_src), 0);
+ if (single_src_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ single_dst_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_dst), 0);
+ if (single_dst_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ if (rte_pktmbuf_read(op->m_src, 0,
+ rte_pktmbuf_pkt_len(op->m_src),
+ single_src_buf) == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ stream.avail_in = op->src.length;
+ stream.next_in = single_src_buf;
+ stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
+ stream.next_out = single_dst_buf;
+
+ } else {
+ stream.avail_in = op->src.length;
+ stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ stream.avail_out = op->m_dst->data_len;
+ stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ }
+ /* Stateless operation, all buffer will be compressed in one go */
+ zlib_flush = map_zlib_flush_flag(op->flush_flag);
+ ret = deflate(&stream, zlib_flush);
+
+ if (stream.avail_in != 0) {
+ RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ if (ret != Z_STREAM_END)
+ goto exit;
+
+ /* Copy data to destination SGL */
+ if (op->m_src->nb_segs > 1) {
+ uint32_t remaining_data = stream.total_out;
+ uint8_t *src_data = single_dst_buf;
+ struct rte_mbuf *dst_buf = op->m_dst;
+
+ while (remaining_data > 0) {
+ uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
+ uint8_t *);
+ /* Last segment */
+ if (remaining_data < dst_buf->data_len) {
+ memcpy(dst_data, src_data, remaining_data);
+ remaining_data = 0;
+ } else {
+ memcpy(dst_data, src_data, dst_buf->data_len);
+ remaining_data -= dst_buf->data_len;
+ src_data += dst_buf->data_len;
+ dst_buf = dst_buf->next;
+ }
+ }
+ }
+
+ op->consumed = stream.total_in;
+ op->produced = stream.total_out;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ deflateReset(&stream);
+
+ ret = 0;
+exit:
+ deflateEnd(&stream);
+ rte_free(single_src_buf);
+ rte_free(single_dst_buf);
+
+ return ret;
+}
+
+static int
+decompress_zlib(struct rte_comp_op *op,
+ const struct rte_comp_xform *xform)
+{
+ z_stream stream;
+ int window_bits;
+ int zlib_flush;
+ int ret = TEST_FAILED;
+ uint8_t *single_src_buf = NULL;
+ uint8_t *single_dst_buf = NULL;
+
+ /* initialize zlib stream */
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+
+ /*
+ * Window bits is the base two logarithm of the window size (in bytes).
+ * When doing raw DEFLATE, this number will be negative.
+ */
+ window_bits = -(xform->decompress.window_size);
+
+ ret = inflateInit2(&stream, window_bits);
+
+ if (ret != Z_OK) {
+ printf("Zlib deflate could not be initialized\n");
+ goto exit;
+ }
+
+ /* Assuming stateless operation */
+ /* SGL */
+ if (op->m_src->nb_segs > 1) {
+ single_src_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_src), 0);
+ if (single_src_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ single_dst_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_dst), 0);
+ if (single_dst_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ if (rte_pktmbuf_read(op->m_src, 0,
+ rte_pktmbuf_pkt_len(op->m_src),
+ single_src_buf) == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ stream.avail_in = op->src.length;
+ stream.next_in = single_src_buf;
+ stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
+ stream.next_out = single_dst_buf;
+
+ } else {
+ stream.avail_in = op->src.length;
+ stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ stream.avail_out = op->m_dst->data_len;
+ stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ }
+
+ /* Stateless operation, all buffer will be compressed in one go */
+ zlib_flush = map_zlib_flush_flag(op->flush_flag);
+ ret = inflate(&stream, zlib_flush);
+
+ if (stream.avail_in != 0) {
+ RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ if (ret != Z_STREAM_END)
+ goto exit;
+
+ if (op->m_src->nb_segs > 1) {
+ uint32_t remaining_data = stream.total_out;
+ uint8_t *src_data = single_dst_buf;
+ struct rte_mbuf *dst_buf = op->m_dst;
+
+ while (remaining_data > 0) {
+ uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
+ uint8_t *);
+ /* Last segment */
+ if (remaining_data < dst_buf->data_len) {
+ memcpy(dst_data, src_data, remaining_data);
+ remaining_data = 0;
+ } else {
+ memcpy(dst_data, src_data, dst_buf->data_len);
+ remaining_data -= dst_buf->data_len;
+ src_data += dst_buf->data_len;
+ dst_buf = dst_buf->next;
+ }
+ }
+ }
+
+ op->consumed = stream.total_in;
+ op->produced = stream.total_out;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ inflateReset(&stream);
+
+ ret = 0;
+exit:
+ inflateEnd(&stream);
+
+ return ret;
+}
+
+static int
+prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
+ uint32_t total_data_size,
+ struct rte_mempool *small_mbuf_pool,
+ struct rte_mempool *large_mbuf_pool,
+ uint8_t limit_segs_in_sgl)
+{
+ uint32_t remaining_data = total_data_size;
+ uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
+ struct rte_mempool *pool;
+ struct rte_mbuf *next_seg;
+ uint32_t data_size;
+ char *buf_ptr;
+ const char *data_ptr = test_buf;
+ uint16_t i;
+ int ret;
+
+ if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
+ num_remaining_segs = limit_segs_in_sgl - 1;
+
+ /*
+ * Allocate data in the first segment (header) and
+ * copy data if test buffer is provided
+ */
+ if (remaining_data < SMALL_SEG_SIZE)
+ data_size = remaining_data;
+ else
+ data_size = SMALL_SEG_SIZE;
+ buf_ptr = rte_pktmbuf_append(head_buf, data_size);
+ if (buf_ptr == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Not enough space in the 1st buffer\n");
+ return -1;
+ }
+
+ if (data_ptr != NULL) {
+ /* Copy characters without NULL terminator */
+ strncpy(buf_ptr, data_ptr, data_size);
+ data_ptr += data_size;
+ }
+ remaining_data -= data_size;
+ num_remaining_segs--;
+
+ /*
+ * Allocate the rest of the segments,
+ * copy the rest of the data and chain the segments.
+ */
+ for (i = 0; i < num_remaining_segs; i++) {
+
+ if (i == (num_remaining_segs - 1)) {
+ /* last segment */
+ if (remaining_data > SMALL_SEG_SIZE)
+ pool = large_mbuf_pool;
+ else
+ pool = small_mbuf_pool;
+ data_size = remaining_data;
+ } else {
+ data_size = SMALL_SEG_SIZE;
+ pool = small_mbuf_pool;
+ }
+
+ next_seg = rte_pktmbuf_alloc(pool);
+ if (next_seg == NULL) {
+ RTE_LOG(ERR, USER1,
+ "New segment could not be allocated "
+ "from the mempool\n");
+ return -1;
+ }
+ buf_ptr = rte_pktmbuf_append(next_seg, data_size);
+ if (buf_ptr == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Not enough space in the buffer\n");
+ rte_pktmbuf_free(next_seg);
+ return -1;
+ }
+ if (data_ptr != NULL) {
+ /* Copy characters without NULL terminator */
+ strncpy(buf_ptr, data_ptr, data_size);
+ data_ptr += data_size;
+ }
+ remaining_data -= data_size;
+
+ ret = rte_pktmbuf_chain(head_buf, next_seg);
+ if (ret != 0) {
+ rte_pktmbuf_free(next_seg);
+ RTE_LOG(ERR, USER1,
+ "Segment could not chained\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Compresses and decompresses buffer with compressdev API and Zlib API
+ */
+static int
+test_deflate_comp_decomp(const char * const test_bufs[],
+ unsigned int num_bufs,
+ uint16_t buf_idx[],
+ struct rte_comp_xform *compress_xforms[],
+ struct rte_comp_xform *decompress_xforms[],
+ unsigned int num_xforms,
+ enum rte_comp_op_type state,
+ unsigned int sgl,
+ enum zlib_direction zlib_dir)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ int ret_status = -1;
+ int ret;
+ struct rte_mbuf *uncomp_bufs[num_bufs];
+ struct rte_mbuf *comp_bufs[num_bufs];
+ struct rte_comp_op *ops[num_bufs];
+ struct rte_comp_op *ops_processed[num_bufs];
+ void *priv_xforms[num_bufs];
+ uint16_t num_enqd, num_deqd, num_total_deqd;
+ uint16_t num_priv_xforms = 0;
+ unsigned int deqd_retries = 0;
+ struct priv_op_data *priv_data;
+ char *buf_ptr;
+ unsigned int i;
+ struct rte_mempool *buf_pool;
+ uint32_t data_size;
+ const struct rte_compressdev_capabilities *capa =
+ rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ char *contig_buf = NULL;
+
+ /* Initialize all arrays to NULL */
+ memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
+ memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
+ memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
+ memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
+ memset(priv_xforms, 0, sizeof(void *) * num_bufs);
+
+ if (sgl)
+ buf_pool = ts_params->small_mbuf_pool;
+ else
+ buf_pool = ts_params->large_mbuf_pool;
+
+ /* Prepare the source mbufs with the data */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool,
+ uncomp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Source mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) + 1;
+ if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) + 1;
+ buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
+ snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
+ }
+ }
+
+ /* Prepare the destination mbufs */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Destination mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) *
+ COMPRESS_BUF_SIZE_RATIO;
+ if (prepare_sgl_bufs(NULL, comp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) *
+ COMPRESS_BUF_SIZE_RATIO;
+ rte_pktmbuf_append(comp_bufs[i], data_size);
+ }
+ }
+
+ /* Build the compression operations */
+ ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compress operations could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ ops[i]->m_src = uncomp_bufs[i];
+ ops[i]->m_dst = comp_bufs[i];
+ ops[i]->src.offset = 0;
+ ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
+ ops[i]->dst.offset = 0;
+ if (state == RTE_COMP_OP_STATELESS) {
+ ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+ } else {
+ RTE_LOG(ERR, USER1,
+ "Stateful operations are not supported "
+ "in these tests yet\n");
+ goto exit;
+ }
+ ops[i]->input_chksum = 0;
+ /*
+ * Store original operation index in private data,
+ * since ordering does not have to be maintained,
+ * when dequeueing from compressdev, so a comparison
+ * at the end of the test can be done.
+ */
+ priv_data = (struct priv_op_data *) (ops[i] + 1);
+ priv_data->orig_idx = i;
+ }
+
+ /* Compress data (either with Zlib API or compressdev API */
+ if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
+ for (i = 0; i < num_bufs; i++) {
+ const struct rte_comp_xform *compress_xform =
+ compress_xforms[i % num_xforms];
+ ret = compress_zlib(ops[i], compress_xform,
+ DEFAULT_MEM_LEVEL);
+ if (ret < 0)
+ goto exit;
+
+ ops_processed[i] = ops[i];
+ }
+ } else {
+ /* Create compress private xform data */
+ for (i = 0; i < num_xforms; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ (const struct rte_comp_xform *)compress_xforms[i],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+ /* Attach shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++)
+ ops[i]->private_xform = priv_xforms[i % num_xforms];
+ } else {
+ /* Create rest of the private xforms for the other ops */
+ for (i = num_xforms; i < num_bufs; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ compress_xforms[i % num_xforms],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ /* Attach non shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++)
+ ops[i]->private_xform = priv_xforms[i];
+ }
+
+ /* Enqueue and dequeue all operations */
+ num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
+ if (num_enqd < num_bufs) {
+ RTE_LOG(ERR, USER1,
+ "The operations could not be enqueued\n");
+ goto exit;
+ }
+
+ num_total_deqd = 0;
+ do {
+ /*
+ * If retrying a dequeue call, wait for 10 ms to allow
+ * enough time to the driver to process the operations
+ */
+ if (deqd_retries != 0) {
+ /*
+ * Avoid infinite loop if not all the
+ * operations get out of the device
+ */
+ if (deqd_retries == MAX_DEQD_RETRIES) {
+ RTE_LOG(ERR, USER1,
+ "Not all operations could be "
+ "dequeued\n");
+ goto exit;
+ }
+ usleep(DEQUEUE_WAIT_TIME);
+ }
+ num_deqd = rte_compressdev_dequeue_burst(0, 0,
+ &ops_processed[num_total_deqd], num_bufs);
+ num_total_deqd += num_deqd;
+ deqd_retries++;
+ } while (num_total_deqd < num_enqd);
+
+ deqd_retries = 0;
+
+ /* Free compress private xforms */
+ for (i = 0; i < num_priv_xforms; i++) {
+ rte_compressdev_private_xform_free(0, priv_xforms[i]);
+ priv_xforms[i] = NULL;
+ }
+ num_priv_xforms = 0;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx % num_xforms;
+ const struct rte_comp_compress_xform *compress_xform =
+ &compress_xforms[xform_idx]->compress;
+ enum rte_comp_huffman huffman_type =
+ compress_xform->deflate.huffman;
+ RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
+ "(level = %d, huffman = %s)\n",
+ buf_idx[priv_data->orig_idx],
+ ops_processed[i]->consumed, ops_processed[i]->produced,
+ compress_xform->level,
+ huffman_type_strings[huffman_type]);
+ RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
+ (float)ops_processed[i]->produced /
+ ops_processed[i]->consumed * 100);
+ ops[i] = NULL;
+ }
+
+ /*
+ * Check operation status and free source mbufs (destination mbuf and
+ * compress operation information is needed for the decompression stage)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "Some operations were not successful\n");
+ goto exit;
+ }
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
+ uncomp_bufs[priv_data->orig_idx] = NULL;
+ }
+
+ /* Allocate buffers for decompressed data */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Destination mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)
+ (ops_processed[i] + 1);
+ data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
+ if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)
+ (ops_processed[i] + 1);
+ data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
+ rte_pktmbuf_append(uncomp_bufs[i], data_size);
+ }
+ }
+
+ /* Build the decompression operations */
+ ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompress operations could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ /* Source buffer is the compressed data from the previous operations */
+ for (i = 0; i < num_bufs; i++) {
+ ops[i]->m_src = ops_processed[i]->m_dst;
+ ops[i]->m_dst = uncomp_bufs[i];
+ ops[i]->src.offset = 0;
+ /*
+ * Set the length of the compressed data to the
+ * number of bytes that were produced in the previous stage
+ */
+ ops[i]->src.length = ops_processed[i]->produced;
+ ops[i]->dst.offset = 0;
+ if (state == RTE_COMP_OP_STATELESS) {
+ ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+ } else {
+ RTE_LOG(ERR, USER1,
+ "Stateful operations are not supported "
+ "in these tests yet\n");
+ goto exit;
+ }
+ ops[i]->input_chksum = 0;
+ /*
+ * Copy private data from previous operations,
+ * to keep the pointer to the original buffer
+ */
+ memcpy(ops[i] + 1, ops_processed[i] + 1,
+ sizeof(struct priv_op_data));
+ }
+
+ /*
+ * Free the previous compress operations,
+ * as it is not needed anymore
+ */
+ for (i = 0; i < num_bufs; i++) {
+ rte_comp_op_free(ops_processed[i]);
+ ops_processed[i] = NULL;
+ }
+
+ /* Decompress data (either with Zlib API or compressdev API */
+ if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx % num_xforms;
+ const struct rte_comp_xform *decompress_xform =
+ decompress_xforms[xform_idx];
+
+ ret = decompress_zlib(ops[i], decompress_xform);
+ if (ret < 0)
+ goto exit;
+
+ ops_processed[i] = ops[i];
+ }
+ } else {
+ /* Create decompress private xform data */
+ for (i = 0; i < num_xforms; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ (const struct rte_comp_xform *)decompress_xforms[i],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+ /* Attach shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx %
+ num_xforms;
+ ops[i]->private_xform = priv_xforms[xform_idx];
+ }
+ } else {
+ /* Create rest of the private xforms for the other ops */
+ for (i = num_xforms; i < num_bufs; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ decompress_xforms[i % num_xforms],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ /* Attach non shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx;
+ ops[i]->private_xform = priv_xforms[xform_idx];
+ }
+ }
+
+ /* Enqueue and dequeue all operations */
+ num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
+ if (num_enqd < num_bufs) {
+ RTE_LOG(ERR, USER1,
+ "The operations could not be enqueued\n");
+ goto exit;
+ }
+
+ num_total_deqd = 0;
+ do {
+ /*
+ * If retrying a dequeue call, wait for 10 ms to allow
+ * enough time to the driver to process the operations
+ */
+ if (deqd_retries != 0) {
+ /*
+ * Avoid infinite loop if not all the
+ * operations get out of the device
+ */
+ if (deqd_retries == MAX_DEQD_RETRIES) {
+ RTE_LOG(ERR, USER1,
+ "Not all operations could be "
+ "dequeued\n");
+ goto exit;
+ }
+ usleep(DEQUEUE_WAIT_TIME);
+ }
+ num_deqd = rte_compressdev_dequeue_burst(0, 0,
+ &ops_processed[num_total_deqd], num_bufs);
+ num_total_deqd += num_deqd;
+ deqd_retries++;
+ } while (num_total_deqd < num_enqd);
+
+ deqd_retries = 0;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
+ buf_idx[priv_data->orig_idx],
+ ops_processed[i]->consumed, ops_processed[i]->produced);
+ ops[i] = NULL;
+ }
+
+ /*
+ * Check operation status and free source mbuf (destination mbuf and
+ * compress operation information is still needed)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "Some operations were not successful\n");
+ goto exit;
+ }
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
+ comp_bufs[priv_data->orig_idx] = NULL;
+ }
+
+ /*
+ * Compare the original stream with the decompressed stream
+ * (in size and the data)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ const char *buf1 = test_bufs[priv_data->orig_idx];
+ const char *buf2;
+ contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
+ if (contig_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Contiguous buffer could not "
+ "be allocated\n");
+ goto exit;
+ }
+
+ buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
+ ops_processed[i]->produced, contig_buf);
+
+ if (compare_buffers(buf1, strlen(buf1) + 1,
+ buf2, ops_processed[i]->produced) < 0)
+ goto exit;
+
+ rte_free(contig_buf);
+ contig_buf = NULL;
+ }
+
+ ret_status = 0;
+
+exit:
+ /* Free resources */
+ for (i = 0; i < num_bufs; i++) {
+ rte_pktmbuf_free(uncomp_bufs[i]);
+ rte_pktmbuf_free(comp_bufs[i]);
+ rte_comp_op_free(ops[i]);
+ rte_comp_op_free(ops_processed[i]);
+ }
+ for (i = 0; i < num_priv_xforms; i++) {
+ if (priv_xforms[i] != NULL)
+ rte_compressdev_private_xform_free(0, priv_xforms[i]);
+ }
+ rte_free(contig_buf);
+
+ return ret_status;
+}
+
+static int
+test_compressdev_deflate_stateless_fixed(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ uint16_t i;
+ int ret;
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
+ return -ENOTSUP;
+
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_dynamic(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ uint16_t i;
+ int ret;
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
+ return -ENOTSUP;
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_multi_op(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t num_bufs = RTE_DIM(compress_test_bufs);
+ uint16_t buf_idx[num_bufs];
+ uint16_t i;
+
+ for (i = 0; i < num_bufs; i++)
+ buf_idx[i] = i;
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
+ buf_idx,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0)
+ return TEST_FAILED;
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
+ buf_idx,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_compressdev_deflate_stateless_multi_level(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ unsigned int level;
+ uint16_t i;
+ int ret;
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+ for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
+ level++) {
+ compress_xform->compress.level = level;
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+#define NUM_XFORMS 3
+static int
+test_compressdev_deflate_stateless_multi_xform(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t num_bufs = NUM_XFORMS;
+ struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
+ struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
+ const char *test_buffers[NUM_XFORMS];
+ uint16_t i;
+ unsigned int level = RTE_COMP_LEVEL_MIN;
+ uint16_t buf_idx[num_bufs];
+
+ int ret;
+
+ /* Create multiple xforms with various levels */
+ for (i = 0; i < NUM_XFORMS; i++) {
+ compress_xforms[i] = rte_malloc(NULL,
+ sizeof(struct rte_comp_xform), 0);
+ if (compress_xforms[i] == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xforms[i], ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xforms[i]->compress.level = level;
+ level++;
+
+ decompress_xforms[i] = rte_malloc(NULL,
+ sizeof(struct rte_comp_xform), 0);
+ if (decompress_xforms[i] == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Decompress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
+ sizeof(struct rte_comp_xform));
+ }
+
+ for (i = 0; i < NUM_XFORMS; i++) {
+ buf_idx[i] = 0;
+ /* Use the same buffer in all sessions */
+ test_buffers[i] = compress_test_bufs[0];
+ }
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(test_buffers, num_bufs,
+ buf_idx,
+ compress_xforms,
+ decompress_xforms,
+ NUM_XFORMS,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ ret = TEST_SUCCESS;
+exit:
+ for (i = 0; i < NUM_XFORMS; i++) {
+ rte_free(compress_xforms[i]);
+ rte_free(decompress_xforms[i]);
+ }
+
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_sgl(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t i;
+ const char *test_buffer;
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
+ return -ENOTSUP;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 1,
+ ZLIB_DECOMPRESS) < 0)
+ return TEST_FAILED;
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 1,
+ ZLIB_COMPRESS) < 0)
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite compressdev_testsuite = {
+ .suite_name = "compressdev unit test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL,
+ test_compressdev_invalid_configuration),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_fixed),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_dynamic),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_op),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_level),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_xform),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_sgl),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_compressdev(void)
+{
+ return unit_test_suite_runner(&compressdev_testsuite);
+}
+
+REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
diff --git a/test/test/test_compressdev_test_buffer.h b/test/test/test_compressdev_test_buffer.h
new file mode 100644
index 00000000..c0492f89
--- /dev/null
+++ b/test/test/test_compressdev_test_buffer.h
@@ -0,0 +1,295 @@
+#ifndef TEST_COMPRESSDEV_TEST_BUFFERS_H_
+#define TEST_COMPRESSDEV_TEST_BUFFERS_H_
+
+/*
+ * These test buffers are snippets obtained
+ * from the Canterbury and Calgary Corpus
+ * collection.
+ */
+
+/* Snippet of Alice's Adventures in Wonderland */
+static const char test_buf_alice[] =
+ " Alice was beginning to get very tired of sitting by her sister\n"
+ "on the bank, and of having nothing to do: once or twice she had\n"
+ "peeped into the book her sister was reading, but it had no\n"
+ "pictures or conversations in it, `and what is the use of a book,'\n"
+ "thought Alice `without pictures or conversation?'\n\n"
+ " So she was considering in her own mind (as well as she could,\n"
+ "for the hot day made her feel very sleepy and stupid), whether\n"
+ "the pleasure of making a daisy-chain would be worth the trouble\n"
+ "of getting up and picking the daisies, when suddenly a White\n"
+ "Rabbit with pink eyes ran close by her.\n\n"
+ " There was nothing so VERY remarkable in that; nor did Alice\n"
+ "think it so VERY much out of the way to hear the Rabbit say to\n"
+ "itself, `Oh dear! Oh dear! I shall be late!' (when she thought\n"
+ "it over afterwards, it occurred to her that she ought to have\n"
+ "wondered at this, but at the time it all seemed quite natural);\n"
+ "but when the Rabbit actually TOOK A WATCH OUT OF ITS WAISTCOAT-\n"
+ "POCKET, and looked at it, and then hurried on, Alice started to\n"
+ "her feet, for it flashed across her mind that she had never\n"
+ "before seen a rabbit with either a waistcoat-pocket, or a watch to\n"
+ "take out of it, and burning with curiosity, she ran across the\n"
+ "field after it, and fortunately was just in time to see it pop\n"
+ "down a large rabbit-hole under the hedge.\n\n"
+ " In another moment down went Alice after it, never once\n"
+ "considering how in the world she was to get out again.\n\n"
+ " The rabbit-hole went straight on like a tunnel for some way,\n"
+ "and then dipped suddenly down, so suddenly that Alice had not a\n"
+ "moment to think about stopping herself before she found herself\n"
+ "falling down a very deep well.\n\n"
+ " Either the well was very deep, or she fell very slowly, for she\n"
+ "had plenty of time as she went down to look about her and to\n"
+ "wonder what was going to happen next. First, she tried to look\n"
+ "down and make out what she was coming to, but it was too dark to\n"
+ "see anything; then she looked at the sides of the well, and\n"
+ "noticed that they were filled with cupboards and book-shelves;\n"
+ "here and there she saw maps and pictures hung upon pegs. She\n"
+ "took down a jar from one of the shelves as she passed; it was\n"
+ "labelled `ORANGE MARMALADE', but to her great disappointment it\n"
+ "was empty: she did not like to drop the jar for fear of killing\n"
+ "somebody, so managed to put it into one of the cupboards as she\n"
+ "fell past it.\n\n"
+ " `Well!' thought Alice to herself, `after such a fall as this, I\n"
+ "shall think nothing of tumbling down stairs! How brave they'll\n"
+ "all think me at home! Why, I wouldn't say anything about it,\n"
+ "even if I fell off the top of the house!' (Which was very likely\n"
+ "true.)\n\n"
+ " Down, down, down. Would the fall NEVER come to an end! `I\n"
+ "wonder how many miles I've fallen by this time?' she said aloud.\n"
+ "`I must be getting somewhere near the centre of the earth. Let\n"
+ "me see: that would be four thousand miles down, I think--' (for,\n"
+ "you see, Alice had learnt several things of this sort in her\n"
+ "lessons in the schoolroom, and though this was not a VERY good\n"
+ "opportunity for showing off her knowledge, as there was no one to\n"
+ "listen to her, still it was good practice to say it over) `--yes,\n"
+ "that's about the right distance--but then I wonder what Latitude\n"
+ "or Longitude I've got to?' (Alice had no idea what Latitude was,\n"
+ "or Longitude either, but thought they were nice grand words to\n"
+ "say.)\n\n"
+ " Presently she began again. `I wonder if I shall fall right\n"
+ "THROUGH the earth! How funny it'll seem to come out among the\n"
+ "people that walk with their heads downward! The Antipathies, I\n"
+ "think--' (she was rather glad there WAS no one listening, this\n"
+ "time, as it didn't sound at all the right word) `--but I shall\n"
+ "have to ask them what the name of the country is, you know.\n"
+ "Please, Ma'am, is this New Zealand or Australia?' (and she tried\n"
+ "to curtsey as she spoke--fancy CURTSEYING as you're falling\n"
+ "through the air! Do you think you could manage it?) `And what\n"
+ "an ignorant little girl she'll think me for asking! No, it'll\n"
+ "never do to ask: perhaps I shall see it written up somewhere.'\n"
+ " Down, down, down. There was nothing else to do, so Alice soon\n"
+ "began talking again. `Dinah'll miss me very much to-night, I\n"
+ "should think!' (Dinah was the cat.) `I hope they'll remember\n"
+ "her saucer of milk at tea-time. Dinah my dear! I wish you were\n"
+ "down here with me! There are no mice in the air, I'm afraid, but\n"
+ "you might catch a bat, and that's very like a mouse, you know.\n"
+ "But do cats eat bats, I wonder?' And here Alice began to get\n"
+ "rather sleepy, and went on saying to herself, in a dreamy sort of\n"
+ "way, `Do cats eat bats? Do cats eat bats?' and sometimes, `Do\n"
+ "bats eat cats?' for, you see, as she couldn't answer either\n"
+ "question, it didn't much matter which way she put it. She felt\n"
+ "that she was dozing off, and had just begun to dream that she\n"
+ "was walking hand in hand with Dinah, and saying to her very\n"
+ "earnestly, `Now, Dinah, tell me the truth: did you ever eat a\n"
+ "bat?' when suddenly, thump! thump! down she came upon a heap of\n"
+ "sticks and dry leaves, and the fall was over.\n\n";
+
+/* Snippet of Shakespeare play */
+static const char test_buf_shakespeare[] =
+ "CHARLES wrestler to Frederick.\n"
+ "\n"
+ "\n"
+ "OLIVER |\n"
+ " |\n"
+ "JAQUES (JAQUES DE BOYS:) | sons of Sir Rowland de Boys.\n"
+ " |\n"
+ "ORLANDO |\n"
+ "\n"
+ "\n"
+ "ADAM |\n"
+ " | servants to Oliver.\n"
+ "DENNIS |\n"
+ "\n"
+ "\n"
+ "TOUCHSTONE a clown.\n"
+ "\n"
+ "SIR OLIVER MARTEXT a vicar.\n"
+ "\n"
+ "\n"
+ "CORIN |\n"
+ " | shepherds.\n"
+ "SILVIUS |\n"
+ "\n"
+ "\n"
+ "WILLIAM a country fellow in love with Audrey.\n"
+ "\n"
+ " A person representing HYMEN. (HYMEN:)\n"
+ "\n"
+ "ROSALIND daughter to the banished duke.\n"
+ "\n"
+ "CELIA daughter to Frederick.\n"
+ "\n"
+ "PHEBE a shepherdess.\n"
+ "\n"
+ "AUDREY a country wench.\n"
+ "\n"
+ " Lords, pages, and attendants, &c.\n"
+ " (Forester:)\n"
+ " (A Lord:)\n"
+ " (First Lord:)\n"
+ " (Second Lord:)\n"
+ " (First Page:)\n"
+ " (Second Page:)\n"
+ "\n"
+ "\n"
+ "SCENE Oliver's house; Duke Frederick's court; and the\n"
+ " Forest of Arden.\n"
+ "\n"
+ "\n"
+ "\n"
+ "\n"
+ " AS YOU LIKE IT\n"
+ "\n"
+ "\n"
+ "ACT I\n"
+ "\n"
+ "\n"
+ "\n"
+ "SCENE I Orchard of Oliver's house.\n"
+ "\n"
+ "\n"
+ " [Enter ORLANDO and ADAM]\n"
+ "\n"
+ "ORLANDO As I remember, Adam, it was upon this fashion\n"
+ " bequeathed me by will but poor a thousand crowns,\n"
+ " and, as thou sayest, charged my brother, on his\n"
+ " blessing, to breed me well: and there begins my\n"
+ " sadness. My brother Jaques he keeps at school, and\n"
+ " report speaks goldenly of his profit: for my part,\n"
+ " he keeps me rustically at home, or, to speak more\n"
+ " properly, stays me here at home unkept; for call you\n"
+ " that keeping for a gentleman of my birth, that\n"
+ " differs not from the stalling of an ox? His horses\n"
+ " are bred better; for, besides that they are fair\n"
+ " with their feeding, they are taught their manage,\n"
+ " and to that end riders dearly hired: but I, his\n"
+ " brother, gain nothing under him but growth; for the\n"
+ " which his animals on his dunghills are as much\n"
+ " bound to him as I. Besides this nothing that he so\n"
+ " plentifully gives me, the something that nature gave\n"
+ " me his countenance seems to take from me: he lets\n"
+ " me feed with his hinds, bars me the place of a\n"
+ " brother, and, as much as in him lies, mines my\n"
+ " gentility with my education. This is it, Adam, that\n"
+ " grieves me; and the spirit of my father, which I\n"
+ " think is within me, begins to mutiny against this\n"
+ " servitude: I will no longer endure it, though yet I\n"
+ " know no wise remedy how to avoid it.\n"
+ "\n"
+ "ADAM Yonder comes my master, your brother.\n"
+ "\n"
+ "ORLANDO Go apart, Adam, and thou shalt hear how he will\n";
+
+/* Snippet of source code in Pascal */
+static const char test_buf_pascal[] =
+ " Ptr = 1..DMem;\n"
+ " Loc = 1..IMem;\n"
+ " Loc0 = 0..IMem;\n"
+ " EdgeT = (hout,lin,hin,lout); {Warning this order is important in}\n"
+ " {predicates such as gtS,geS}\n"
+ " CardT = (finite,infinite);\n"
+ " ExpT = Minexp..Maxexp;\n"
+ " ManT = Mininf..Maxinf; \n"
+ " Pflag = (PNull,PSoln,PTrace,PPrint);\n"
+ " Sreal = record\n"
+ " edge:EdgeT;\n"
+ " cardinality:CardT;\n"
+ " exp:ExpT; {exponent}\n"
+ " mantissa:ManT;\n"
+ " end;\n"
+ " Int = record\n"
+ " hi:Sreal;\n"
+ " lo:Sreal;\n"
+ " end;\n"
+ " Instr = record\n"
+ " Code:OpType;\n"
+ " Pars: array[0..Par] of 0..DMem;\n"
+ " end;\n"
+ " DataMem= record\n"
+ " D :array [Ptr] of Int;\n"
+ " S :array [Loc] of State;\n"
+ " LastHalve:Loc;\n"
+ " RHalve :array [Loc] of real;\n"
+ " end;\n"
+ " DataFlags=record\n"
+ " PF :array [Ptr] of Pflag;\n"
+ " end;\n"
+ "var\n"
+ " Debug : (none,activity,post,trace,dump);\n"
+ " Cut : (once,all);\n"
+ " GlobalEnd,Verifiable:boolean;\n"
+ " HalveThreshold:real;\n"
+ " I : array [Loc] of Instr; {Memory holding instructions}\n"
+ " End : Loc; {last instruction in I}\n"
+ " ParN : array [OpType] of -1..Par; {number of parameters for each \n"
+ " opcode. -1 means no result}\n"
+ " ParIntersect : array [OpType] of boolean ;\n"
+ " DInit : DataMem; {initial memory which is cleared and \n"
+ " used in first call}\n"
+ " DF : DataFlags; {hold flags for variables, e.g. print/trace}\n"
+ " MaxDMem:0..DMem;\n"
+ " Shift : array[0..Digits] of 1..maxint;{array of constant multipliers}\n"
+ " {used for alignment etc.}\n"
+ " Dummy :Positive;\n"
+ " {constant intervals and Sreals}\n"
+ " PlusInfS,MinusInfS,PlusSmallS,MinusSmallS,ZeroS,\n"
+ " PlusFiniteS,MinusFiniteS:Sreal;\n"
+ " Zero,All,AllFinite:Int;\n"
+ "\n"
+ "procedure deblank;\n"
+ "var Ch:char;\n"
+ "begin\n"
+ " while (not eof) and (input^ in [' ',' ']) do read(Ch);\n"
+ "end;\n"
+ "\n"
+ "procedure InitialOptions;\n"
+ "\n"
+ "#include '/user/profs/cleary/bin/options.i';\n"
+ "\n"
+ " procedure Option;\n"
+ " begin\n"
+ " case Opt of\n"
+ " 'a','A':Debug:=activity;\n"
+ " 'd','D':Debug:=dump;\n"
+ " 'h','H':HalveThreshold:=StringNum/100;\n"
+ " 'n','N':Debug:=none;\n"
+ " 'p','P':Debug:=post;\n"
+ " 't','T':Debug:=trace;\n"
+ " 'v','V':Verifiable:=true;\n"
+ " end;\n"
+ " end;\n"
+ "\n"
+ "begin\n"
+ " Debug:=trace;\n"
+ " Verifiable:=false;\n"
+ " HalveThreshold:=67/100;\n"
+ " Options;\n"
+ " writeln(Debug);\n"
+ " writeln('Verifiable:',Verifiable);\n"
+ " writeln('Halve threshold',HalveThreshold);\n"
+ "end;{InitialOptions}\n"
+ "\n"
+ "procedure NormalizeUp(E,M:integer;var S:Sreal;var Closed:boolean);\n"
+ "begin\n"
+ "with S do\n"
+ "begin\n"
+ " if M=0 then S:=ZeroS else\n"
+ " if M>0 then\n";
+
+static const char * const compress_test_bufs[] = {
+ test_buf_alice,
+ test_buf_shakespeare,
+ test_buf_pascal
+};
+
+#endif /* TEST_COMPRESSDEV_TEST_BUFFERS_H_ */
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1417482c..a6044b26 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -21,6 +21,8 @@
#include <rte_cryptodev_scheduler_operations.h>
#endif
+#include <rte_lcore.h>
+
#include "test.h"
#include "test_cryptodev.h"
@@ -36,6 +38,9 @@
#include "test_cryptodev_aead_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
+#define VDEV_ARGS_SIZE 100
+#define MAX_NB_SESSIONS 4
+
static int gbl_driver_id;
struct crypto_testsuite_params {
@@ -316,40 +321,81 @@ testsuite_setup(void)
}
}
- /* Create a MRVL device if required */
+ /* Create a MVSAM device if required */
if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_MRVL_PMD))) {
-#ifndef RTE_LIBRTE_PMD_MRVL_CRYPTO
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)));
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ }
+ }
+
+ /* Create an CCP device if required */
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
nb_devs = rte_cryptodev_device_count_by_driver(
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)));
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD),
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD),
NULL);
TEST_ASSERT(ret == 0, "Failed to create "
"instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
}
}
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ char vdev_args[VDEV_ARGS_SIZE] = {""};
+ char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
+ "ordering=enable,name=cryptodev_test_scheduler,corelist="};
+ uint16_t slave_core_count = 0;
+ uint16_t socket_id = 0;
+
if (gbl_driver_id == rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
+ /* Identify the Slave Cores
+ * Use 2 slave cores for the device args
+ */
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ if (slave_core_count > 1)
+ break;
+ snprintf(vdev_args, sizeof(vdev_args),
+ "%s%d", temp_str, i);
+ strcpy(temp_str, vdev_args);
+ strcat(temp_str, ";");
+ slave_core_count++;
+ socket_id = lcore_config[i].socket_id;
+ }
+ if (slave_core_count != 2) {
+ RTE_LOG(ERR, USER1,
+ "Cryptodev scheduler test require at least "
+ "two slave cores to run. "
+ "Please use the correct coremask.\n");
+ return TEST_FAILED;
+ }
+ strcpy(temp_str, vdev_args);
+ snprintf(vdev_args, sizeof(vdev_args), "%s,socket_id=%d",
+ temp_str, socket_id);
+ RTE_LOG(DEBUG, USER1, "vdev_args: %s\n", vdev_args);
nb_devs = rte_cryptodev_device_count_by_driver(
rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
- NULL);
-
+ vdev_args);
TEST_ASSERT(ret == 0,
"Failed to create instance %u of"
" pmd : %s",
@@ -383,15 +429,24 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- unsigned int session_size = rte_cryptodev_get_private_session_size(dev_id);
+ unsigned int session_size =
+ rte_cryptodev_sym_get_private_session_size(dev_id);
/*
* Create mempool with maximum number of sessions * 2,
* to include the session headers
*/
+ if (info.sym.max_nb_sessions != 0 &&
+ info.sym.max_nb_sessions < MAX_NB_SESSIONS) {
+ RTE_LOG(ERR, USER1, "Device does not support "
+ "at least %u sessions\n",
+ MAX_NB_SESSIONS);
+ return TEST_FAILED;
+ }
+
ts_params->session_mpool = rte_mempool_create(
"test_sess_mp",
- info.sym.max_nb_sessions * 2,
+ MAX_NB_SESSIONS * 2,
session_size,
0, 0, NULL, NULL, NULL,
NULL, SOCKET_ID_ANY,
@@ -539,7 +594,7 @@ test_device_configure_invalid_dev_id(void)
dev_id = ts_params->valid_devs[ts_params->valid_dev_count - 1];
/* Stop the device in case it's started so it can be configured */
- rte_cryptodev_stop(ts_params->valid_devs[dev_id]);
+ rte_cryptodev_stop(dev_id);
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id, &ts_params->conf),
"Failed test for rte_cryptodev_configure: "
@@ -1727,6 +1782,44 @@ test_AES_cipheronly_openssl_all(void)
}
static int
+test_AES_chain_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1765,6 +1858,25 @@ test_AES_cipheronly_qat_all(void)
}
static int
+test_AES_cipheronly_virtio_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_dpaa_sec_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1898,6 +2010,25 @@ test_authonly_openssl_all(void)
}
static int
+test_authonly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_armv8_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1927,7 +2058,7 @@ test_AES_chain_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1946,7 +2077,7 @@ test_AES_cipheronly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1965,7 +2096,7 @@ test_authonly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1984,7 +2115,7 @@ test_3DES_chain_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_3DES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -2003,7 +2134,7 @@ test_3DES_cipheronly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_3DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -3029,8 +3160,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place scatter-gather. "
"Test Skipped.\n");
return 0;
}
@@ -3177,8 +3311,11 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+ if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) {
+ printf("Device doesn't support out-of-place scatter-gather "
+ "in both input and output mbufs. "
"Test Skipped.\n");
return 0;
}
@@ -3528,8 +3665,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) {
+ printf("Device doesn't support out-of-place scatter-gather "
+ "in both input and output mbufs. "
"Test Skipped.\n");
return 0;
}
@@ -4362,10 +4503,13 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
return -ENOTSUP;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place scatter-gather. "
"Test Skipped.\n");
- return -ENOTSUP;
+ return 0;
}
plaintext_len = ceil_byte_length(tdata->plaintext.len);
@@ -4876,6 +5020,24 @@ test_DES_cipheronly_mb_all(void)
return TEST_SUCCESS;
}
+static int
+test_3DES_cipheronly_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
static int
test_DES_docsis_mb_all(void)
@@ -4973,6 +5135,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
}
static int
+test_3DES_chain_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_3DES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_3DES_cipheronly_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -6339,10 +6539,10 @@ test_multi_session(void)
sessions = rte_malloc(NULL,
(sizeof(struct rte_cryptodev_sym_session *) *
- dev_info.sym.max_nb_sessions) + 1, 0);
+ MAX_NB_SESSIONS) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ for (i = 0; i < MAX_NB_SESSIONS; i++) {
sessions[i] = rte_cryptodev_sym_session_create(
ts_params->session_mpool);
@@ -6391,7 +6591,7 @@ test_multi_session(void)
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ for (i = 0; i < MAX_NB_SESSIONS; i++) {
rte_cryptodev_sym_session_clear(ts_params->valid_devs[0],
sessions[i]);
rte_cryptodev_sym_session_free(sessions[i]);
@@ -6450,13 +6650,13 @@ test_multi_session_random_usage(void)
sessions = rte_malloc(NULL,
(sizeof(struct rte_cryptodev_sym_session *)
- * dev_info.sym.max_nb_sessions) + 1, 0);
+ * MAX_NB_SESSIONS) + 1, 0);
for (i = 0; i < MB_SESSION_NUMBER; i++) {
sessions[i] = rte_cryptodev_sym_session_create(
ts_params->session_mpool);
- rte_memcpy(&ut_paramz[i].ut_params, &testsuite_params,
+ rte_memcpy(&ut_paramz[i].ut_params, &unittest_params,
sizeof(struct crypto_unittest_params));
test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
@@ -8375,8 +8575,17 @@ test_scheduler_attach_slave_op(void)
rte_mempool_free(ts_params->session_mpool);
ts_params->session_mpool = NULL;
}
- unsigned int session_size = rte_cryptodev_get_private_session_size(i);
+ unsigned int session_size =
+ rte_cryptodev_sym_get_private_session_size(i);
+ if (info.sym.max_nb_sessions != 0 &&
+ info.sym.max_nb_sessions < MAX_NB_SESSIONS) {
+ RTE_LOG(ERR, USER1,
+ "Device does not support "
+ "at least %u sessions\n",
+ MAX_NB_SESSIONS);
+ return TEST_FAILED;
+ }
/*
* Create mempool with maximum number of sessions * 2,
* to include the session headers
@@ -8384,7 +8593,7 @@ test_scheduler_attach_slave_op(void)
if (ts_params->session_mpool == NULL) {
ts_params->session_mpool = rte_mempool_create(
"test_sess_mp",
- info.sym.max_nb_sessions * 2,
+ MAX_NB_SESSIONS * 2,
session_size,
0, 0, NULL, NULL, NULL,
NULL, SOCKET_ID_ANY,
@@ -8428,33 +8637,47 @@ test_scheduler_detach_slave_op(void)
}
static int
-test_scheduler_mode_op(void)
+test_scheduler_mode_op(enum rte_cryptodev_scheduler_mode scheduler_mode)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
uint8_t sched_id = ts_params->valid_devs[0];
- struct rte_cryptodev_scheduler_ops op = {0};
- struct rte_cryptodev_scheduler dummy_scheduler = {
- .description = "dummy scheduler to test mode",
- .name = "dummy scheduler",
- .mode = CDEV_SCHED_MODE_USERDEFINED,
- .ops = &op
- };
- int ret;
+ /* set mode */
+ return rte_cryptodev_scheduler_mode_set(sched_id,
+ scheduler_mode);
+}
- /* set user defined mode */
- ret = rte_cryptodev_scheduler_load_user_scheduler(sched_id,
- &dummy_scheduler);
- TEST_ASSERT(ret == 0,
- "Failed to set cdev %u to user defined mode", sched_id);
-
- /* set round robin mode */
- ret = rte_cryptodev_scheduler_mode_set(sched_id,
- CDEV_SCHED_MODE_ROUNDROBIN);
- TEST_ASSERT(ret == 0,
- "Failed to set cdev %u to round-robin mode", sched_id);
- TEST_ASSERT(rte_cryptodev_scheduler_mode_get(sched_id) ==
- CDEV_SCHED_MODE_ROUNDROBIN, "Scheduling Mode "
- "not match");
+static int
+test_scheduler_mode_roundrobin_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_ROUNDROBIN) ==
+ 0, "Failed to set roundrobin mode");
+ return 0;
+
+}
+
+static int
+test_scheduler_mode_multicore_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_MULTICORE) ==
+ 0, "Failed to set multicore mode");
+
+ return 0;
+}
+
+static int
+test_scheduler_mode_failover_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_FAILOVER) ==
+ 0, "Failed to set failover mode");
+
+ return 0;
+}
+
+static int
+test_scheduler_mode_pkt_size_distr_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_PKT_SIZE_DISTR) ==
+ 0, "Failed to set pktsize mode");
return 0;
}
@@ -8464,8 +8687,20 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = {
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
+ /* Multi Core */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_multicore_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* Round Robin */
TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
- TEST_CASE_ST(NULL, NULL, test_scheduler_mode_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_roundrobin_op),
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_chain_scheduler_all),
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8473,6 +8708,29 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_authonly_scheduler_all),
TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* Fail over */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_failover_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* PKT SIZE */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_pkt_size_distr_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -8767,6 +9025,18 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
}
};
+static struct unit_test_suite cryptodev_virtio_testsuite = {
+ .suite_name = "Crypto VIRTIO Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_virtio_all),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
.suite_name = "Crypto Device AESNI MB Unit Test Suite",
.setup = testsuite_setup,
@@ -8781,6 +9051,8 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_DES_docsis_mb_all),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_CCM_authenticated_encryption_test_case_128_1),
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_CCM_authenticated_decryption_test_case_128_1),
@@ -9646,6 +9918,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite = {
}
};
+static struct unit_test_suite cryptodev_ccp_testsuite = {
+ .suite_name = "Crypto Device CCP Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_multi_session_random_usage),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_ccp_all),
+
+ /** Negative tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9654,9 +9958,9 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
- "in config file to run this testsuite.\n");
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
return TEST_SKIPPED;
}
@@ -9664,6 +9968,22 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
}
static int
+test_cryptodev_virtio(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "VIRTIO PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_virtio_testsuite);
+}
+
+static int
test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/)
{
gbl_driver_id = rte_cryptodev_driver_id_get(
@@ -9795,11 +10115,11 @@ static int
test_cryptodev_mrvl(void)
{
gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "MRVL PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO is enabled "
+ RTE_LOG(ERR, USER1, "MVSAM PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO is enabled "
"in config file to run this testsuite.\n");
return TEST_SKIPPED;
}
@@ -9867,6 +10187,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
}
+static int
+test_cryptodev_ccp(void)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9876,6 +10212,8 @@ REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g);
REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi);
REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc);
REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
-REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
+REGISTER_TEST_COMMAND(cryptodev_sw_mvsam_autotest, test_cryptodev_mrvl);
REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
+REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 8cdc0874..1bd44dcd 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -58,9 +58,12 @@
#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+#define CRYPTODEV_NAME_MVSAM_PMD crypto_mvsam
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
/**
* Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3577ef4b..1c4dc664 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1257,7 +1262,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1302,7 +1308,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest",
@@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1323,7 +1330,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1345,7 +1353,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest",
@@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr =
@@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
};
@@ -1526,7 +1543,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-128-CBC Decryption",
@@ -1538,7 +1557,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Encryption",
@@ -1549,7 +1570,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1570,7 +1593,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Decryption Scatter Gather",
@@ -1590,7 +1615,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC Decryption",
@@ -1602,7 +1629,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC OOP Encryption",
@@ -1612,7 +1641,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC OOP Decryption",
@@ -1622,7 +1653,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-128-CTR Encryption",
@@ -1634,7 +1667,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR Decryption",
@@ -1646,7 +1680,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR Encryption",
@@ -1657,7 +1692,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR Decryption",
@@ -1668,7 +1704,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR Encryption",
@@ -1680,7 +1717,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR Decryption",
@@ -1692,7 +1730,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_asym.c b/test/test/test_cryptodev_asym.c
new file mode 100644
index 00000000..2fdfc1df
--- /dev/null
+++ b/test/test/test_cryptodev_asym.c
@@ -0,0 +1,1369 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+
+#include "test_cryptodev.h"
+#include "test_cryptodev_dh_test_vectors.h"
+#include "test_cryptodev_dsa_test_vectors.h"
+#include "test_cryptodev_mod_test_vectors.h"
+#include "test_cryptodev_rsa_test_vectors.h"
+#include "test_cryptodev_asym_util.h"
+#include "test.h"
+
+#define TEST_NUM_BUFS 10
+#define TEST_NUM_SESSIONS 4
+
+static int gbl_driver_id;
+struct crypto_testsuite_params {
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+ uint8_t valid_devs[RTE_CRYPTO_MAX_DEVS];
+ uint8_t valid_dev_count;
+};
+
+struct crypto_unittest_params {
+ struct rte_cryptodev_asym_session *sess;
+ struct rte_crypto_op *op;
+};
+
+static struct crypto_testsuite_params testsuite_params = { NULL };
+
+static int
+test_rsa_sign_verify(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output_buf[TEST_DATA_SIZE] = {0};
+ uint8_t input_buf[TEST_DATA_SIZE] = {0};
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &rsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__,
+ "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ /* Compute sign on the test vector */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+
+ memcpy(input_buf, &rsaplaintext.data,
+ rsaplaintext.len);
+ asym_op->rsa.message.data = input_buf;
+ asym_op->rsa.message.length = rsaplaintext.len;
+ asym_op->rsa.sign.data = output_buf;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT1;
+
+ debug_hexdump(stdout, "message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "signed message", asym_op->rsa.sign.data,
+ asym_op->rsa.sign.length);
+ asym_op = result_op->asym;
+
+ /* Verify sign */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT2;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ status = TEST_SUCCESS;
+ int ret = 0;
+ ret = rsa_verify(&rsaplaintext, result_op);
+ if (ret)
+ status = TEST_FAILED;
+
+error_exit:
+
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_rsa_enc_dec(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t input_buf[TEST_DATA_SIZE] = {0};
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &rsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__,
+ "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ /*Compute encryption on the test vector */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_ENCRYPT;
+
+ memcpy(input_buf, rsaplaintext.data,
+ rsaplaintext.len);
+ asym_op->rsa.message.data = input_buf;
+ asym_op->rsa.message.length = rsaplaintext.len;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT2;
+
+ debug_hexdump(stdout, "message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "encrypted message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+ /* Use the resulted output as decryption Input vector*/
+ asym_op = result_op->asym;
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_DECRYPT;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT1;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ status = TEST_SUCCESS;
+ int ret = 0;
+ ret = rsa_verify(&rsaplaintext, result_op);
+ if (ret)
+ status = TEST_FAILED;
+
+error_exit:
+
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+testsuite_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_info info;
+ uint32_t i = 0, nb_devs, dev_id;
+ int ret;
+ uint16_t qp_id;
+
+ memset(ts_params, 0, sizeof(*ts_params));
+
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "CRYPTO_ASYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ TEST_NUM_BUFS, 0,
+ 0,
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create ASYM_CRYPTO_OP_POOL\n");
+ return TEST_FAILED;
+ }
+
+ /* Create an OPENSSL device if required */
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)));
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ }
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (nb_devs < 1) {
+ RTE_LOG(ERR, USER1, "No crypto devices found?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create list of valid crypto devs */
+ for (i = 0; i < nb_devs; i++) {
+ rte_cryptodev_info_get(i, &info);
+ if (info.driver_id == gbl_driver_id)
+ ts_params->valid_devs[ts_params->valid_dev_count++] = i;
+ }
+
+ if (ts_params->valid_dev_count < 1)
+ return TEST_FAILED;
+
+ /* Set up all the qps on the first of the valid devices found */
+
+ dev_id = ts_params->valid_devs[0];
+
+ rte_cryptodev_info_get(dev_id, &info);
+
+ /* check if device support asymmetric, skip if not */
+ if (!(info.feature_flags &
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) {
+ RTE_LOG(ERR, USER1, "Device doesn't support asymmetric. "
+ "Test Skipped.\n");
+ return TEST_FAILED;
+ }
+
+ /* configure device with num qp */
+ ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
+ &ts_params->conf),
+ "Failed to configure cryptodev %u with %u qps",
+ dev_id, ts_params->conf.nb_queue_pairs);
+
+ /* configure qp */
+ ts_params->qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT;
+ for (qp_id = 0; qp_id < info.max_nb_queue_pairs; qp_id++) {
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ dev_id, qp_id, &ts_params->qp_conf,
+ rte_cryptodev_socket_id(dev_id),
+ ts_params->session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u ASYM",
+ qp_id, dev_id);
+ }
+
+ /* setup asym session pool */
+ unsigned int session_size =
+ rte_cryptodev_asym_get_private_session_size(dev_id);
+ /*
+ * Create mempool with TEST_NUM_SESSIONS * 2,
+ * to include the session headers
+ */
+ ts_params->session_mpool = rte_mempool_create(
+ "test_asym_sess_mp",
+ TEST_NUM_SESSIONS * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ if (ts_params->op_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
+ rte_mempool_avail_count(ts_params->op_mpool));
+ }
+
+ /* Free session mempools */
+ if (ts_params->session_mpool != NULL) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
+}
+
+static int
+ut_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ uint16_t qp_id;
+
+ /* Reconfigure device to default parameters */
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ ts_params->valid_devs[0], qp_id,
+ &ts_params->qp_conf,
+ rte_cryptodev_socket_id(ts_params->valid_devs[0]),
+ ts_params->session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+ }
+
+ rte_cryptodev_stats_reset(ts_params->valid_devs[0]);
+
+ /* Start the device */
+ TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devs[0]),
+ "Failed to start cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ return TEST_SUCCESS;
+}
+
+static void
+ut_teardown(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_stats stats;
+
+ rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats);
+
+ /* Stop the device */
+ rte_cryptodev_stop(ts_params->valid_devs[0]);
+}
+
+static inline void print_asym_capa(
+ const struct rte_cryptodev_asymmetric_xform_capability *capa)
+{
+ int i = 0;
+
+ printf("\nxform type: %s\n===================\n",
+ rte_crypto_asym_xform_strings[capa->xform_type]);
+ printf("operation supported -");
+
+ for (i = 0; i < RTE_CRYPTO_ASYM_OP_LIST_END; i++) {
+ /* check supported operations */
+ if (rte_cryptodev_asym_xform_capability_check_optype(capa, i))
+ printf(" %s",
+ rte_crypto_asym_op_strings[i]);
+ }
+ switch (capa->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ printf(" modlen: min %d max %d increment %d\n",
+ capa->modlen.min,
+ capa->modlen.max,
+ capa->modlen.increment);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+test_capability(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *dev_capa;
+ int i = 0;
+ struct rte_cryptodev_asym_capability_idx idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capa;
+
+ rte_cryptodev_info_get(dev_id, &dev_info);
+ if (!(dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) {
+ RTE_LOG(INFO, USER1,
+ "Device doesn't support asymmetric. Test Skipped\n");
+ return TEST_SUCCESS;
+ }
+
+ /* print xform capability */
+ for (i = 0;
+ dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
+ dev_capa = &(dev_info.capabilities[i]);
+ if (dev_info.capabilities[i].op ==
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ idx.type = dev_capa->asym.xform_capa.xform_type;
+
+ capa = rte_cryptodev_asym_capability_get(dev_id,
+ (const struct
+ rte_cryptodev_asym_capability_idx *) &idx);
+ print_asym_capa(capa);
+ }
+ }
+ return TEST_SUCCESS;
+}
+
+static int
+test_dh_gen_shared_sec(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+ uint8_t peer[] = "01234567890123456789012345678901234567890123456789";
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ /* Setup a xform and op to generate private key only */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE;
+ xform.next = NULL;
+ asym_op->dh.priv_key.data = dh_test_params.priv_key.data;
+ asym_op->dh.priv_key.length = dh_test_params.priv_key.length;
+ asym_op->dh.pub_key.data = (uint8_t *)peer;
+ asym_op->dh.pub_key.length = sizeof(peer);
+ asym_op->dh.shared_secret.data = output;
+ asym_op->dh.shared_secret.length = sizeof(output);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "shared secret:",
+ asym_op->dh.shared_secret.data,
+ asym_op->dh.shared_secret.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+ return status;
+}
+
+static int
+test_dh_gen_priv_key(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ /* Setup a xform and op to generate private key only */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE;
+ xform.next = NULL;
+ asym_op->dh.priv_key.data = output;
+ asym_op->dh.priv_key.length = sizeof(output);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "private key:",
+ asym_op->dh.priv_key.data,
+ asym_op->dh.priv_key.length);
+
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+
+static int
+test_dh_gen_pub_key(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+ /* Setup a xform chain to generate public key
+ * using test private key
+ *
+ */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE;
+ xform.next = NULL;
+
+ asym_op->dh.pub_key.data = output;
+ asym_op->dh.pub_key.length = sizeof(output);
+ /* load pre-defined private key */
+ asym_op->dh.priv_key.data = rte_malloc(NULL,
+ dh_test_params.priv_key.length,
+ 0);
+ asym_op->dh.priv_key = dh_test_params.priv_key;
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "pub key:",
+ asym_op->dh.pub_key.data, asym_op->dh.pub_key.length);
+
+ debug_hexdump(stdout, "priv key:",
+ asym_op->dh.priv_key.data, asym_op->dh.priv_key.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+static int
+test_dh_gen_kp(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t out_pub_key[TEST_DH_MOD_LEN];
+ uint8_t out_prv_key[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform pub_key_xform;
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+ /* Setup a xform chain to generate
+ * private key first followed by
+ * public key
+ */xform.dh.type = RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE;
+ pub_key_xform.xform_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ pub_key_xform.dh.type = RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE;
+ xform.next = &pub_key_xform;
+
+ asym_op->dh.pub_key.data = out_pub_key;
+ asym_op->dh.pub_key.length = sizeof(out_pub_key);
+ asym_op->dh.priv_key.data = out_prv_key;
+ asym_op->dh.priv_key.length = sizeof(out_prv_key);
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "priv key:",
+ out_prv_key, asym_op->dh.priv_key.length);
+ debug_hexdump(stdout, "pub key:",
+ out_pub_key, asym_op->dh.pub_key.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+static int
+test_mod_inv(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ struct rte_cryptodev_asym_capability_idx cap_idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capability;
+ uint8_t input[TEST_DATA_SIZE] = {0};
+ int ret = 0;
+
+ if (rte_cryptodev_asym_get_xform_enum(
+ &modinv_xform.xform_type, "modinv") < 0) {
+ RTE_LOG(ERR, USER1,
+ "Invalid ASYNC algorithm specified\n");
+ return -1;
+ }
+
+ cap_idx.type = modinv_xform.xform_type;
+ capability = rte_cryptodev_asym_capability_get(dev_id,
+ &cap_idx);
+
+ if (rte_cryptodev_asym_xform_capability_check_modlen(
+ capability,
+ modinv_xform.modinv.modulus.length)) {
+ RTE_LOG(ERR, USER1,
+ "Invalid MODULOUS length specified\n");
+ return -1;
+ }
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &modinv_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* generate crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ memcpy(input, base, sizeof(base));
+ asym_op->modinv.base.data = input;
+ asym_op->modinv.base.length = sizeof(base);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ ret = verify_modinv(mod_inv, result_op);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "operation verification failed\n");
+ status = TEST_FAILED;
+ }
+
+error_exit:
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_mod_exp(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ struct rte_cryptodev_asym_capability_idx cap_idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capability;
+ uint8_t input[TEST_DATA_SIZE] = {0};
+ int ret = 0;
+
+ if (rte_cryptodev_asym_get_xform_enum(&modex_xform.xform_type,
+ "modexp")
+ < 0) {
+ RTE_LOG(ERR, USER1,
+ "Invalid ASYNC algorithm specified\n");
+ return -1;
+ }
+
+ /* check for modlen capability */
+ cap_idx.type = modex_xform.xform_type;
+ capability = rte_cryptodev_asym_capability_get(dev_id, &cap_idx);
+
+ if (rte_cryptodev_asym_xform_capability_check_modlen(
+ capability, modex_xform.modex.modulus.length)) {
+ RTE_LOG(ERR, USER1,
+ "Invalid MODULOUS length specified\n");
+ return -1;
+ }
+
+ /* generate crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (!sess) {
+ RTE_LOG(ERR, USER1,
+ "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &modex_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ memcpy(input, base, sizeof(base));
+ asym_op->modex.base.data = input;
+ asym_op->modex.base.length = sizeof(base);
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ ret = verify_modexp(mod_exp, result_op);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "operation verification failed\n");
+ status = TEST_FAILED;
+ }
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_dh_keygenration(void)
+{
+ int status;
+
+ debug_hexdump(stdout, "p:", dh_xform.dh.p.data, dh_xform.dh.p.length);
+ debug_hexdump(stdout, "g:", dh_xform.dh.g.data, dh_xform.dh.g.length);
+ debug_hexdump(stdout, "priv_key:", dh_test_params.priv_key.data,
+ dh_test_params.priv_key.length);
+
+ RTE_LOG(INFO, USER1,
+ "Test Public and Private key pair generation\n");
+
+ status = test_dh_gen_kp(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test Public Key Generation using pre-defined priv key\n");
+
+ status = test_dh_gen_pub_key(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test Private Key Generation only\n");
+
+ status = test_dh_gen_priv_key(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test shared secret compute\n");
+
+ status = test_dh_gen_shared_sec(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_dsa_sign(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t r[TEST_DH_MOD_LEN];
+ uint8_t s[TEST_DH_MOD_LEN];
+ uint8_t dgst[] = "35d81554afaad2cf18f3a1770d5fedc4ea5be344";
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ debug_hexdump(stdout, "p: ", dsa_xform.dsa.p.data,
+ dsa_xform.dsa.p.length);
+ debug_hexdump(stdout, "q: ", dsa_xform.dsa.q.data,
+ dsa_xform.dsa.q.length);
+ debug_hexdump(stdout, "g: ", dsa_xform.dsa.g.data,
+ dsa_xform.dsa.g.length);
+ debug_hexdump(stdout, "priv_key: ", dsa_xform.dsa.x.data,
+ dsa_xform.dsa.x.length);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &dsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+ asym_op->dsa.op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+ asym_op->dsa.message.data = dgst;
+ asym_op->dsa.message.length = sizeof(dgst);
+ asym_op->dsa.r.length = sizeof(r);
+ asym_op->dsa.r.data = r;
+ asym_op->dsa.s.length = sizeof(s);
+ asym_op->dsa.s.data = s;
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = result_op->asym;
+
+ debug_hexdump(stdout, "r:",
+ asym_op->dsa.r.data, asym_op->dsa.r.length);
+ debug_hexdump(stdout, "s:",
+ asym_op->dsa.s.data, asym_op->dsa.s.length);
+
+ /* Test PMD DSA sign verification using signer public key */
+ asym_op->dsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY;
+
+ /* copy signer public key */
+ asym_op->dsa.y.data = dsa_test_params.y.data;
+ asym_op->dsa.y.length = dsa_test_params.y.length;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (result_op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ }
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+ return status;
+}
+
+static int
+test_dsa(void)
+{
+ int status;
+ status = test_dsa_sign();
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+ return status;
+}
+
+
+static struct unit_test_suite cryptodev_openssl_asym_testsuite = {
+ .suite_name = "Crypto Device OPENSSL ASYM Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_capability),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_dsa),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_dh_keygenration),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_rsa_enc_dec),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_rsa_sign_verify),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_mod_inv),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_mod_exp),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_cryptodev_openssl_asym(void)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "OPENSSL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_OPENSSL is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_openssl_asym_testsuite);
+}
+
+REGISTER_TEST_COMMAND(cryptodev_openssl_asym_autotest,
+ test_cryptodev_openssl_asym);
diff --git a/test/test/test_cryptodev_asym_util.h b/test/test/test_cryptodev_asym_util.h
new file mode 100644
index 00000000..dff0c2ad
--- /dev/null
+++ b/test/test/test_cryptodev_asym_util.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_ASYM_TEST_UTIL_H__
+#define TEST_CRYPTODEV_ASYM_TEST_UTIL_H__
+
+/* Below Apis compare resulted buffer to original test vector */
+
+static inline int rsa_verify(struct rsa_test_data *rsa_param,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(rsa_param->data,
+ result_op->asym->rsa.message.data,
+ result_op->asym->rsa.message.length))
+ return -1;
+ return 0;
+}
+
+static inline int verify_modinv(uint8_t *mod_inv,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(mod_inv, result_op->asym->modinv.base.data,
+ result_op->asym->modinv.base.length))
+ return -1;
+ return 0;
+}
+
+static inline int verify_modexp(uint8_t *mod_exp,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(mod_exp, result_op->asym->modex.base.data,
+ result_op->asym->modex.base.length))
+ return -1;
+ return 0;
+}
+
+#endif /* TEST_CRYPTODEV_ASYM_TEST_UTIL_H__ */
+
+
+
+
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index ed066180..f2701f8f 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
int openssl_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int ccp_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
int scheduler_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -67,18 +69,34 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
int mrvl_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ int virtio_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
int nb_segs = 1;
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
- rte_cryptodev_info_get(dev_id, &dev_info);
- if (!(dev_info.feature_flags &
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+ uint64_t feat_flags = dev_info.feature_flags;
+ uint64_t oop_flag = RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT;
+
+ if (t->feature_mask && BLOCKCIPHER_TEST_FEATURE_OOP) {
+ if (!(feat_flags & oop_flag)) {
+ printf("Device doesn't support out-of-place "
+ "scatter-gather in input mbuf. "
+ "Test Skipped.\n");
+ return 0;
+ }
+ } else {
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place "
+ "scatter-gather mbufs. "
"Test Skipped.\n");
- return 0;
+ return 0;
+ }
}
+
nb_segs = 3;
}
@@ -94,7 +112,9 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
driver_id == qat_pmd ||
driver_id == openssl_pmd ||
driver_id == armv8_pmd ||
- driver_id == mrvl_pmd) { /* Fall through */
+ driver_id == mrvl_pmd ||
+ driver_id == ccp_pmd ||
+ driver_id == virtio_pmd) { /* Fall through */
digest_len = tdata->digest.len;
} else if (driver_id == aesni_mb_pmd ||
driver_id == scheduler_pmd) {
@@ -432,11 +452,34 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t value;
uint32_t head_unchanged_len, changed_len = 0;
uint32_t i;
+ uint32_t hdroom_used = 0, tlroom_used = 0;
+ uint32_t hdroom = 0;
mbuf = sym_op->m_src;
+ /*
+ * Crypto PMDs specify the headroom & tailroom it would use
+ * when processing the crypto operation. PMD is free to modify
+ * this space, and so the verification check should skip that
+ * block.
+ */
+ hdroom_used = dev_info.min_mbuf_headroom_req;
+ tlroom_used = dev_info.min_mbuf_tailroom_req;
+
+ /* Get headroom */
+ hdroom = rte_pktmbuf_headroom(mbuf);
+
head_unchanged_len = mbuf->buf_len;
for (i = 0; i < mbuf->buf_len; i++) {
+
+ /* Skip headroom used by PMD */
+ if (i == hdroom - hdroom_used)
+ i += hdroom_used;
+
+ /* Skip tailroom used by PMD */
+ if (i == (hdroom + mbuf->data_len))
+ i += tlroom_used;
+
value = *((uint8_t *)(mbuf->buf_addr)+i);
if (value != tmp_src_buf[i]) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -449,14 +492,13 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
mbuf = sym_op->m_dst;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH) {
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
- sym_op->auth.data.offset;
+ head_unchanged_len = hdroom + sym_op->auth.data.offset;
changed_len = sym_op->auth.data.length;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
changed_len += digest_len;
} else {
/* cipher-only */
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->cipher.data.offset;
changed_len = sym_op->cipher.data.length;
}
@@ -480,15 +522,30 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t value;
uint32_t head_unchanged_len = 0, changed_len = 0;
uint32_t i;
+ uint32_t hdroom_used = 0, tlroom_used = 0;
+ uint32_t hdroom = 0;
+
+ /*
+ * Crypto PMDs specify the headroom & tailroom it would use
+ * when processing the crypto operation. PMD is free to modify
+ * this space, and so the verification check should skip that
+ * block.
+ */
+ hdroom_used = dev_info.min_mbuf_headroom_req;
+ tlroom_used = dev_info.min_mbuf_tailroom_req;
mbuf = sym_op->m_src;
+
+ /* Get headroom */
+ hdroom = rte_pktmbuf_headroom(mbuf);
+
if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->cipher.data.offset;
changed_len = sym_op->cipher.data.length;
} else {
/* auth-only */
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->auth.data.offset +
sym_op->auth.data.length;
changed_len = 0;
@@ -498,8 +555,18 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
changed_len += digest_len;
for (i = 0; i < mbuf->buf_len; i++) {
+
+ /* Skip headroom used by PMD */
+ if (i == hdroom - hdroom_used)
+ i += hdroom_used;
+
if (i == head_unchanged_len)
i += changed_len;
+
+ /* Skip tailroom used by PMD */
+ if (i == (hdroom + mbuf->data_len))
+ i += tlroom_used;
+
value = *((uint8_t *)(mbuf->buf_addr)+i);
if (value != tmp_src_buf[i]) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -555,6 +622,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
int openssl_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int ccp_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -568,7 +637,9 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
int qat_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
int mrvl_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ int virtio_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
switch (test_type) {
case BLKCIPHER_AES_CHAIN_TYPE:
@@ -627,10 +698,14 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
else if (driver_id == dpaa2_sec_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+ else if (driver_id == ccp_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
else if (driver_id == dpaa_sec_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
else if (driver_id == mrvl_pmd)
- target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MRVL;
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MVSAM;
+ else if (driver_id == virtio_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO;
else
TEST_ASSERT(0, "Unrecognized cryptodev type");
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index edbdaabe..6f7c8929 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -26,7 +26,9 @@
#define BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER 0x0010 /* Scheduler */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */
-#define BLOCKCIPHER_TEST_TARGET_PMD_MRVL 0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_MVSAM 0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0040 /* CCP flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO 0x0200 /* VIRTIO flag */
#define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0be809e3..10334560 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -792,6 +792,30 @@ triple_des192cbc_hmac_sha1_test_vector = {
.len = 20
}
};
+static const struct blockcipher_test_data
+triple_des64cbc_test_vector = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des,
+ .len = 512
+ },
+ .ciphertext = {
+ .data = ciphertext512_des,
+ .len = 512
+ },
+};
static const struct blockcipher_test_data
des_cbc_test_vector = {
@@ -826,7 +850,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "DES-CBC Decryption",
@@ -835,7 +859,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
};
@@ -1044,7 +1068,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1053,19 +1078,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
.test_data = &triple_des128cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
.test_data = &triple_des128cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1075,7 +1103,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1085,21 +1114,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1180,7 +1212,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr =
@@ -1189,19 +1222,38 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
};
static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
{
+ .test_descr = "3DES-64-CBC Encryption",
+ .test_data = &triple_des64cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "3DES-64-CBC Decryption",
+ .test_data = &triple_des64cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
.test_descr = "3DES-128-CBC Encryption",
.test_data = &triple_des128cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-128-CBC Decryption",
@@ -1210,7 +1262,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1274,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-192-CBC Decryption",
@@ -1230,7 +1286,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_dh_test_vectors.h b/test/test/test_cryptodev_dh_test_vectors.h
new file mode 100644
index 00000000..fe7510dc
--- /dev/null
+++ b/test/test/test_cryptodev_dh_test_vectors.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_DH_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_DH_TEST_VECTORS_H_
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+#define TEST_DH_MOD_LEN 1024
+
+
+struct dh_test_param {
+ rte_crypto_param priv_key;
+};
+
+uint8_t dh_priv[] = {
+ 0x46, 0x3c, 0x7b, 0x43, 0xd1, 0xb8, 0xd4, 0x7a,
+ 0x56, 0x28, 0x85, 0x79, 0xcc, 0xd8, 0x90, 0x03,
+ 0x0c, 0x4b, 0xc6, 0xd3, 0x7f, 0xb3, 0x19, 0x84,
+ 0x8a, 0xc6, 0x0d, 0x24, 0x5e, 0xaa, 0x7e, 0x7a,
+ 0x73, 0x88, 0xa6, 0x47, 0x7c, 0x42, 0x78, 0x63,
+ 0x11, 0x12, 0xd3, 0xa0, 0xc5, 0xfe, 0xfd, 0xf2,
+ 0x9e, 0x17, 0x90, 0xe5, 0x6d, 0xcc, 0x20, 0x6f,
+ 0xe8, 0x82, 0x28, 0xbf, 0x5c, 0xe6, 0xd4, 0x86,
+ 0x5c, 0x35, 0x32, 0x97, 0xc2, 0x86, 0x1b, 0xc5,
+ 0x59, 0x1c, 0x0b, 0x1b, 0xec, 0x60, 0x3c, 0x1d,
+ 0x8d, 0x7f, 0xf0, 0xc7, 0x48, 0x3a, 0x51, 0x09,
+ 0xf2, 0x3e, 0x9e, 0x35, 0x74, 0x98, 0x4d, 0xad,
+ 0x39, 0xa7, 0xf2, 0xd2, 0xb4, 0x32, 0xd3, 0xc8,
+ 0xe9, 0x45, 0xbe, 0x56, 0xe7, 0x87, 0xe0, 0xa0,
+ 0x97, 0x6b, 0x5f, 0x99, 0x5e, 0x41, 0x59, 0x33,
+ 0x95, 0x64, 0x0d, 0xe9, 0x58, 0x5b, 0xa6, 0x38
+};
+
+uint8_t dh_p[] = {
+ 0xef, 0xee, 0x8c, 0x8b, 0x3f, 0x85, 0x95, 0xcd,
+ 0x4d, 0x68, 0x5d, 0x4a, 0x5d, 0x1f, 0x2a, 0x2e,
+ 0xdd, 0xcf, 0xef, 0x1b, 0x3b, 0xe9, 0x7b, 0x0c,
+ 0x13, 0xee, 0x76, 0xd5, 0x93, 0xca, 0x8b, 0xc8,
+ 0x0b, 0x97, 0x00, 0xec, 0x1f, 0x34, 0xa2, 0xce,
+ 0x83, 0x8d, 0x80, 0xea, 0xfe, 0x11, 0xed, 0x28,
+ 0xdd, 0x32, 0x22, 0x77, 0x96, 0x4e, 0xc5, 0xed,
+ 0xc8, 0x7a, 0x52, 0x10, 0x22, 0xcc, 0xb2, 0x4d,
+ 0xd3, 0xda, 0x03, 0xf5, 0x1e, 0xa8, 0x79, 0x23,
+ 0x8b, 0xe1, 0x78, 0x47, 0x07, 0x5b, 0x26, 0xbb,
+ 0x53, 0x46, 0x0b, 0x18, 0x5c, 0x07, 0x4e, 0xb6,
+ 0x76, 0xc9, 0xa8, 0xd5, 0x30, 0xa3, 0xbe, 0x8d,
+ 0xae, 0xcd, 0x34, 0x68, 0x62, 0x5f, 0xb9, 0x5c,
+ 0x34, 0x90, 0xf0, 0xda, 0x47, 0x86, 0x36, 0x04,
+ 0x28, 0xbc, 0x7d, 0xae, 0x9d, 0x4e, 0x61, 0x28,
+ 0x70, 0xdb, 0xa6, 0x55, 0x04, 0x46, 0x27, 0xe3
+};
+
+uint8_t dh_g[] = {0x02};
+
+struct dh_test_param dh_test_params = {
+ .priv_key = {
+ .data = dh_priv,
+ .length = sizeof(dh_priv)
+ }
+};
+
+struct rte_crypto_asym_xform dh_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .dh = {
+ .p = {
+ .data = dh_p,
+ .length = sizeof(dh_p)
+ },
+ .g = {
+ .data = dh_g,
+ .length = sizeof(dh_g)
+ },
+ }
+};
+
+#endif /* TEST_CRYPTODEV_DH_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_dsa_test_vectors.h b/test/test/test_cryptodev_dsa_test_vectors.h
new file mode 100644
index 00000000..bbcb0d72
--- /dev/null
+++ b/test/test/test_cryptodev_dsa_test_vectors.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_DSA_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_DSA_TEST_VECTORS_H_
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+
+
+struct dsa_test_param {
+ rte_crypto_param y;
+};
+
+static unsigned char dsa_x[] = {
+ 0xc5, 0x3e, 0xae, 0x6d, 0x45, 0x32, 0x31, 0x64,
+ 0xc7, 0xd0, 0x7a, 0xf5, 0x71, 0x57, 0x03, 0x74,
+ 0x4a, 0x63, 0xfc, 0x3a
+};
+
+uint8_t dsa_y[] = {
+ 0x31, 0x3f, 0xd9, 0xeb, 0xca, 0x91, 0x57, 0x4e,
+ 0x1c, 0x2e, 0xeb, 0xe1, 0x51, 0x7c, 0x57, 0xe0,
+ 0xc2, 0x1b, 0x02, 0x09, 0x87, 0x21, 0x40, 0xc5,
+ 0x32, 0x87, 0x61, 0xbb, 0xb2, 0x45, 0x0b, 0x33,
+ 0xf1, 0xb1, 0x8b, 0x40, 0x9c, 0xe9, 0xab, 0x7c,
+ 0x4c, 0xd8, 0xfd, 0xa3, 0x39, 0x1e, 0x8e, 0x34,
+ 0x86, 0x83, 0x57, 0xc1, 0x99, 0xe1, 0x6a, 0x6b,
+ 0x2e, 0xba, 0x06, 0xd6, 0x74, 0x9d, 0xef, 0x79,
+ 0x1d, 0x79, 0xe9, 0x5d, 0x3a, 0x4d, 0x09, 0xb2,
+ 0x4c, 0x39, 0x2a, 0xd8, 0x9d, 0xbf, 0x10, 0x09,
+ 0x95, 0xae, 0x19, 0xc0, 0x10, 0x62, 0x05, 0x6b,
+ 0xb1, 0x4b, 0xce, 0x00, 0x5e, 0x87, 0x31, 0xef,
+ 0xde, 0x17, 0x5f, 0x95, 0xb9, 0x75, 0x08, 0x9b,
+ 0xdc, 0xda, 0xea, 0x56, 0x2b, 0x32, 0x78, 0x6d,
+ 0x96, 0xf5, 0xa3, 0x1a, 0xed, 0xf7, 0x53, 0x64,
+ 0x00, 0x8a, 0xd4, 0xff, 0xfe, 0xbb, 0x97, 0x0b
+};
+
+static unsigned char dsa_p[] = {
+ 0xa8, 0xf9, 0xcd, 0x20, 0x1e, 0x5e, 0x35, 0xd8,
+ 0x92, 0xf8, 0x5f, 0x80, 0xe4, 0xdb, 0x25, 0x99,
+ 0xa5, 0x67, 0x6a, 0x3b, 0x1d, 0x4f, 0x19, 0x03,
+ 0x30, 0xed, 0x32, 0x56, 0xb2, 0x6d, 0x0e, 0x80,
+ 0xa0, 0xe4, 0x9a, 0x8f, 0xff, 0xaa, 0xad, 0x2a,
+ 0x24, 0xf4, 0x72, 0xd2, 0x57, 0x32, 0x41, 0xd4,
+ 0xd6, 0xd6, 0xc7, 0x48, 0x0c, 0x80, 0xb4, 0xc6,
+ 0x7b, 0xb4, 0x47, 0x9c, 0x15, 0xad, 0xa7, 0xea,
+ 0x84, 0x24, 0xd2, 0x50, 0x2f, 0xa0, 0x14, 0x72,
+ 0xe7, 0x60, 0x24, 0x17, 0x13, 0xda, 0xb0, 0x25,
+ 0xae, 0x1b, 0x02, 0xe1, 0x70, 0x3a, 0x14, 0x35,
+ 0xf6, 0x2d, 0xdf, 0x4e, 0xe4, 0xc1, 0xb6, 0x64,
+ 0x06, 0x6e, 0xb2, 0x2f, 0x2e, 0x3b, 0xf2, 0x8b,
+ 0xb7, 0x0a, 0x2a, 0x76, 0xe4, 0xfd, 0x5e, 0xbe,
+ 0x2d, 0x12, 0x29, 0x68, 0x1b, 0x5b, 0x06, 0x43,
+ 0x9a, 0xc9, 0xc7, 0xe9, 0xd8, 0xbd, 0xe2, 0x83
+};
+
+static unsigned char dsa_q[] = {
+ 0xf8, 0x5f, 0x0f, 0x83, 0xac, 0x4d, 0xf7, 0xea,
+ 0x0c, 0xdf, 0x8f, 0x46, 0x9b, 0xfe, 0xea, 0xea,
+ 0x14, 0x15, 0x64, 0x95
+};
+
+static unsigned char dsa_g[] = {
+ 0x2b, 0x31, 0x52, 0xff, 0x6c, 0x62, 0xf1, 0x46,
+ 0x22, 0xb8, 0xf4, 0x8e, 0x59, 0xf8, 0xaf, 0x46,
+ 0x88, 0x3b, 0x38, 0xe7, 0x9b, 0x8c, 0x74, 0xde,
+ 0xea, 0xe9, 0xdf, 0x13, 0x1f, 0x8b, 0x85, 0x6e,
+ 0x3a, 0xd6, 0xc8, 0x45, 0x5d, 0xab, 0x87, 0xcc,
+ 0x0d, 0xa8, 0xac, 0x97, 0x34, 0x17, 0xce, 0x4f,
+ 0x78, 0x78, 0x55, 0x7d, 0x6c, 0xdf, 0x40, 0xb3,
+ 0x5b, 0x4a, 0x0c, 0xa3, 0xeb, 0x31, 0x0c, 0x6a,
+ 0x95, 0xd6, 0x8c, 0xe2, 0x84, 0xad, 0x4e, 0x25,
+ 0xea, 0x28, 0x59, 0x16, 0x11, 0xee, 0x08, 0xb8,
+ 0x44, 0x4b, 0xd6, 0x4b, 0x25, 0xf3, 0xf7, 0xc5,
+ 0x72, 0x41, 0x0d, 0xdf, 0xb3, 0x9c, 0xc7, 0x28,
+ 0xb9, 0xc9, 0x36, 0xf8, 0x5f, 0x41, 0x91, 0x29,
+ 0x86, 0x99, 0x29, 0xcd, 0xb9, 0x09, 0xa6, 0xa3,
+ 0xa9, 0x9b, 0xbe, 0x08, 0x92, 0x16, 0x36, 0x81,
+ 0x71, 0xbd, 0x0b, 0xa8, 0x1d, 0xe4, 0xfe, 0x33
+};
+
+struct dsa_test_param dsa_test_params = {
+ .y = {
+ .data = dsa_y,
+ .length = sizeof(dsa_y)
+ }
+};
+
+struct rte_crypto_asym_xform dsa_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .dsa = {
+ .p = {
+ .data = dsa_p,
+ .length = sizeof(dsa_p)
+ },
+ .q = {
+ .data = dsa_q,
+ .length = sizeof(dsa_q)
+ },
+ .g = {
+ .data = dsa_g,
+ .length = sizeof(dsa_g)
+ },
+ .x = {
+ .data = dsa_x,
+ .length = sizeof(dsa_x)
+ }
+
+ }
+};
+
+#endif /* TEST_CRYPTODEV_DSA_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 93dacb7b..cf86dbb1 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -319,18 +319,68 @@ hmac_sha512_test_vector = {
}
};
+static const struct blockcipher_test_data
+cmac_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .ciphertext = {
+ .data = plaintext_hash,
+ .len = 512
+ },
+ .auth_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96,
+ 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27
+ },
+ .len = 16,
+ .truncated_len = 16
+ }
+};
+
+static const struct blockcipher_test_data
+cmac_test_vector_12 = {
+ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .ciphertext = {
+ .data = plaintext_hash,
+ .len = 512
+ },
+ .auth_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96,
+ 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27
+ },
+ .len = 12,
+ .truncated_len = 12
+ }
+};
+
static const struct blockcipher_test_case hash_test_cases[] = {
{
.test_descr = "MD5 Digest",
.test_data = &md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "MD5 Digest Verify",
.test_data = &md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-MD5 Digest",
@@ -341,7 +391,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-MD5 Digest Verify",
@@ -352,19 +403,24 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA1 Digest",
.test_data = &sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA1 Digest Verify",
.test_data = &sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest",
@@ -375,7 +431,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest Scatter Gather",
@@ -394,7 +452,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
@@ -408,13 +468,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "SHA224 Digest",
.test_data = &sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA224 Digest Verify",
.test_data = &sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA224 Digest",
@@ -425,6 +489,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -436,19 +501,24 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA256 Digest",
.test_data = &sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA256 Digest Verify",
.test_data = &sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA256 Digest",
@@ -459,7 +529,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA256 Digest Verify",
@@ -470,19 +542,25 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA384 Digest",
.test_data = &sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA384 Digest Verify",
.test_data = &sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA384 Digest",
@@ -493,7 +571,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA384 Digest Verify",
@@ -504,19 +584,25 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA512 Digest",
.test_data = &sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA512 Digest Verify",
.test_data = &sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA512 Digest",
@@ -527,7 +613,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA512 Digest Verify",
@@ -538,8 +626,34 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
+ {
+ .test_descr = "CMAC Digest 12B",
+ .test_data = &cmac_test_vector_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest Verify 12B",
+ .test_data = &cmac_test_vector_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest 16B",
+ .test_data = &cmac_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest Verify 16B",
+ .test_data = &cmac_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ }
};
#endif /* TEST_CRYPTODEV_HASH_TEST_VECTORS_H_ */
diff --git a/test/test/test_cryptodev_mod_test_vectors.h b/test/test/test_cryptodev_mod_test_vectors.h
new file mode 100644
index 00000000..a25c676a
--- /dev/null
+++ b/test/test/test_cryptodev_mod_test_vectors.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_MOD_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_MOD_TEST_VECTORS_H_
+
+/* modular operation test data */
+uint8_t base[] = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
+ 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
+ 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
+};
+
+uint8_t mod_p[] = {
+ 0x00, 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00,
+ 0x0a, 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5,
+ 0xce, 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a,
+ 0xa2, 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde,
+ 0x0a, 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a,
+ 0x3d, 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63,
+ 0x6a, 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27,
+ 0x6e, 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa,
+ 0x72, 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53,
+ 0x87, 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a,
+ 0x62, 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63,
+ 0x18, 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33,
+ 0x4e, 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3,
+ 0x03, 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e,
+ 0xee, 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb,
+ 0xa6, 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde,
+ 0x55
+};
+
+uint8_t mod_e[] = {0x01, 0x00, 0x01};
+
+/* Precomputed modular exponentiation for verification */
+uint8_t mod_exp[] = {
+ 0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
+ 0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
+ 0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
+ 0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
+ 0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
+ 0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
+ 0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
+ 0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
+ 0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
+ 0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
+ 0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
+ 0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
+ 0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
+ 0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
+ 0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
+ 0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
+};
+
+/* Precomputed modular inverse for verification */
+uint8_t mod_inv[] = {
+ 0x52, 0xb1, 0xa3, 0x8c, 0xc5, 0x8a, 0xb9, 0x1f,
+ 0xb6, 0x82, 0xf5, 0x6a, 0x9a, 0xde, 0x8d, 0x2e,
+ 0x62, 0x4b, 0xac, 0x49, 0x21, 0x1d, 0x30, 0x4d,
+ 0x32, 0xac, 0x1f, 0x40, 0x6d, 0x52, 0xc7, 0x9b,
+ 0x6c, 0x0a, 0x82, 0x3a, 0x2c, 0xaf, 0x6b, 0x6d,
+ 0x17, 0xbe, 0x43, 0xed, 0x97, 0x78, 0xeb, 0x4c,
+ 0x92, 0x6f, 0xcf, 0xed, 0xb1, 0x09, 0xcb, 0x27,
+ 0xc2, 0xde, 0x62, 0xfd, 0x21, 0xe6, 0xbd, 0x4f,
+ 0xfe, 0x7a, 0x1b, 0x50, 0xfe, 0x10, 0x4a, 0xb0,
+ 0xb7, 0xcf, 0xdb, 0x7d, 0xca, 0xc2, 0xf0, 0x1c,
+ 0x39, 0x48, 0x6a, 0xb5, 0x4d, 0x8c, 0xfe, 0x63,
+ 0x91, 0x9c, 0x21, 0xc3, 0x0e, 0x76, 0xad, 0x44,
+ 0x8d, 0x54, 0x33, 0x99, 0xe1, 0x80, 0x19, 0xba,
+ 0xb5, 0xac, 0x7d, 0x9c, 0xce, 0x91, 0x2a, 0xd9,
+ 0x2c, 0xe1, 0x16, 0xd6, 0xd7, 0xcf, 0x9d, 0x05,
+ 0x9a, 0x66, 0x9a, 0x3a, 0xc1, 0xb8, 0x4b, 0xc3
+};
+
+struct rte_crypto_asym_xform modex_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .modex = {
+ .modulus = {
+ .data = mod_p,
+ .length = sizeof(mod_p)
+ },
+ .exponent = {
+ .data = mod_e,
+ .length = sizeof(mod_e)
+ }
+ }
+};
+
+struct rte_crypto_asym_xform modinv_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .modinv = {
+ .modulus = {
+ .data = mod_p,
+ .length = sizeof(mod_p)
+ }
+ }
+};
+
+#endif /* TEST_CRYPTODEV_MOD_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_rsa_test_vectors.h b/test/test/test_cryptodev_rsa_test_vectors.h
new file mode 100644
index 00000000..3f8c41a6
--- /dev/null
+++ b/test/test/test_cryptodev_rsa_test_vectors.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_RSA_TEST_VECTORS_H__
+#define TEST_CRYPTODEV_RSA_TEST_VECTORS_H__
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+
+struct rsa_test_data {
+ uint8_t data[TEST_DATA_SIZE];
+ unsigned int len;
+};
+
+struct rsa_test_data rsaplaintext = {
+ .data = {
+ 0xf8, 0xba, 0x1a, 0x55, 0xd0, 0x2f, 0x85, 0xae,
+ 0x96, 0x7b, 0xb6, 0x2f, 0xb6, 0xcd, 0xa8, 0xeb,
+ 0x7e, 0x78, 0xa0, 0x50
+ },
+ .len = 20
+};
+
+uint8_t rsa_n[] = {
+ 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00,
+ 0x0a, 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5,
+ 0xce, 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a,
+ 0xa2, 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde,
+ 0x0a, 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a,
+ 0x3d, 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63,
+ 0x6a, 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27,
+ 0x6e, 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa,
+ 0x72, 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53,
+ 0x87, 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a,
+ 0x62, 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63,
+ 0x18, 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33,
+ 0x4e, 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3,
+ 0x03, 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e,
+ 0xee, 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb,
+ 0xa6, 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde,
+ 0x55
+};
+
+uint8_t rsa_d[] = {
+ 0x24, 0xd7, 0xea, 0xf4, 0x7f, 0xe0, 0xca, 0x31,
+ 0x4d, 0xee, 0xc4, 0xa1, 0xbe, 0xab, 0x06, 0x61,
+ 0x32, 0xe7, 0x51, 0x46, 0x27, 0xdf, 0x72, 0xe9,
+ 0x6f, 0xa8, 0x4c, 0xd1, 0x26, 0xef, 0x65, 0xeb,
+ 0x67, 0xff, 0x5f, 0xa7, 0x3b, 0x25, 0xb9, 0x08,
+ 0x8e, 0xa0, 0x47, 0x56, 0xe6, 0x8e, 0xf9, 0xd3,
+ 0x18, 0x06, 0x3d, 0xc6, 0xb1, 0xf8, 0xdc, 0x1b,
+ 0x8d, 0xe5, 0x30, 0x54, 0x26, 0xac, 0x16, 0x3b,
+ 0x7b, 0xad, 0x46, 0x9e, 0x21, 0x6a, 0x57, 0xe6,
+ 0x81, 0x56, 0x1d, 0x2a, 0xc4, 0x39, 0x63, 0x67,
+ 0x81, 0x2c, 0xca, 0xcc, 0xf8, 0x42, 0x04, 0xbe,
+ 0xcf, 0x8f, 0x6c, 0x5b, 0x81, 0x46, 0xb9, 0xc7,
+ 0x62, 0x90, 0x87, 0x35, 0x03, 0x9b, 0x89, 0xcb,
+ 0x37, 0xbd, 0xf1, 0x1b, 0x99, 0xa1, 0x9a, 0x78,
+ 0xd5, 0x4c, 0xdd, 0x3f, 0x41, 0x0c, 0xb7, 0x1a,
+ 0xd9, 0x7b, 0x87, 0x5f, 0xbe, 0xb1, 0x83, 0x41
+};
+
+uint8_t rsa_e[] = {0x01, 0x00, 0x01};
+
+/** rsa xform using exponent key */
+struct rte_crypto_asym_xform rsa_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .rsa = {
+ .n = {
+ .data = rsa_n,
+ .length = sizeof(rsa_n)
+ },
+ .e = {
+ .data = rsa_e,
+ .length = sizeof(rsa_e)
+ },
+ .key_type = RTE_RSA_KEY_TYPE_EXP,
+ .d = {
+ .data = rsa_d,
+ .length = sizeof(rsa_d)
+ }
+ }
+};
+
+#endif /* TEST_CRYPTODEV_RSA_TEST_VECTORS_H__ */
diff --git a/test/test/test_devargs.c b/test/test/test_devargs.c
deleted file mode 100644
index b8f3146f..00000000
--- a/test/test/test_devargs.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2014 6WIND S.A.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/queue.h>
-
-#include <rte_debug.h>
-#include <rte_devargs.h>
-
-#include "test.h"
-
-/* clear devargs list that was modified by the test */
-static void free_devargs_list(void)
-{
- struct rte_devargs *devargs;
-
- while (!TAILQ_EMPTY(&devargs_list)) {
- devargs = TAILQ_FIRST(&devargs_list);
- TAILQ_REMOVE(&devargs_list, devargs, next);
- free(devargs->args);
- free(devargs);
- }
-}
-
-static int
-test_devargs(void)
-{
- struct rte_devargs_list save_devargs_list;
- struct rte_devargs *devargs;
-
- /* save the real devargs_list, it is restored at the end of the test */
- save_devargs_list = devargs_list;
- TAILQ_INIT(&devargs_list);
-
- /* test valid cases */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:00.1") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "0000:5:00.0") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "04:00.0,arg=val") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "0000:01:00.1") < 0)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 2)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 2)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring0") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,key=val,k2=val2") < 0)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 2)
- goto fail;
- free_devargs_list();
-
- /* check virtual device with argument parsing */
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,k1=val,k2=val2") < 0)
- goto fail;
- devargs = TAILQ_FIRST(&devargs_list);
- if (strncmp(devargs->name, "net_ring1",
- sizeof(devargs->name)) != 0)
- goto fail;
- if (!devargs->args || strcmp(devargs->args, "k1=val,k2=val2") != 0)
- goto fail;
- free_devargs_list();
-
- /* check PCI device with empty argument parsing */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "04:00.1") < 0)
- goto fail;
- devargs = TAILQ_FIRST(&devargs_list);
- if (strcmp(devargs->name, "04:00.1") != 0)
- goto fail;
- if (!devargs->args || strcmp(devargs->args, "") != 0)
- goto fail;
- free_devargs_list();
-
- /* test error case: bad PCI address */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:1") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "00.1") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "foo") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, ",") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "000f:0:0") == 0)
- goto fail;
-
- devargs_list = save_devargs_list;
- return 0;
-
- fail:
- free_devargs_list();
- devargs_list = save_devargs_list;
- return -1;
-}
-
-REGISTER_TEST_COMMAND(devargs_autotest, test_devargs);
diff --git a/test/test/test_distributor_perf.c b/test/test/test_distributor_perf.c
index 557715e1..edf1998a 100644
--- a/test/test/test_distributor_perf.c
+++ b/test/test/test_distributor_perf.c
@@ -31,7 +31,7 @@ struct worker_stats worker_stats[RTE_MAX_LCORE];
* worker thread used for testing the time to do a round-trip of a cache
* line between two cores and back again
*/
-static void
+static int
flip_bit(volatile uint64_t *arg)
{
uint64_t old_val = 0;
@@ -41,6 +41,7 @@ flip_bit(volatile uint64_t *arg)
old_val = *arg;
*arg = 0;
}
+ return 0;
}
/*
diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 37c42efe..3a990766 100644
--- a/test/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
@@ -33,7 +33,7 @@
#define memtest "memtest"
#define memtest1 "memtest1"
#define memtest2 "memtest2"
-#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
+#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 20)
#define launch_proc(ARGV) process_dup(ARGV, \
sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
@@ -221,30 +221,6 @@ get_number_of_sockets(void)
}
#endif
-static char*
-get_current_prefix(char * prefix, int size)
-{
- char path[PATH_MAX] = {0};
- char buf[PATH_MAX] = {0};
-
- /* get file for config (fd is always 3) */
- snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
-
- /* return NULL on error */
- if (readlink(path, buf, sizeof(buf)) == -1)
- return NULL;
-
- /* get the basename */
- snprintf(buf, sizeof(buf), "%s", basename(buf));
-
- /* copy string all the way from second char up to start of _config */
- snprintf(prefix, size, "%.*s",
- (int)(strnlen(buf, sizeof(buf)) - sizeof("_config")),
- &buf[1]);
-
- return prefix;
-}
-
/*
* Test that the app doesn't run with invalid whitelist option.
* Final tests ensures it does run with valid options as sanity check (one
@@ -376,17 +352,17 @@ test_invalid_vdev_flag(void)
#endif
/* Test with invalid vdev option */
- const char *vdevinval[] = {prgname, prefix, "-n", "1",
+ const char *vdevinval[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "eth_dummy"};
/* Test with valid vdev option */
- const char *vdevval1[] = {prgname, prefix, "-n", "1",
+ const char *vdevval1[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0"};
- const char *vdevval2[] = {prgname, prefix, "-n", "1",
+ const char *vdevval2[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0,args=test"};
- const char *vdevval3[] = {prgname, prefix, "-n", "1",
+ const char *vdevval3[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0,nodeaction=r1:0:CREATE"};
if (launch_proc(vdevinval) == 0) {
@@ -697,16 +673,18 @@ test_invalid_n_flag(void)
static int
test_no_hpet_flag(void)
{
- char prefix[PATH_MAX], tmp[PATH_MAX];
+ char prefix[PATH_MAX] = "";
#ifdef RTE_EXEC_ENV_BSDAPP
return 0;
-#endif
+#else
+ char tmp[PATH_MAX];
if (get_current_prefix(tmp, sizeof(tmp)) == NULL) {
printf("Error - unable to get current prefix!\n");
return -1;
}
snprintf(prefix, sizeof(prefix), "--file-prefix=%s", tmp);
+#endif
/* With --no-hpet */
const char *argv1[] = {prgname, prefix, mp_flag, no_hpet, "-c", "1", "-n", "2"};
@@ -849,13 +827,10 @@ test_misc_flags(void)
const char *argv4[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog"};
/* With invalid --syslog */
const char *argv5[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog", "error"};
- /* With no-sh-conf */
+ /* With no-sh-conf, also use no-huge to ensure this test runs on BSD */
const char *argv6[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
- no_shconf, nosh_prefix };
+ no_shconf, nosh_prefix, no_huge};
-#ifdef RTE_EXEC_ENV_BSDAPP
- return 0;
-#endif
/* With --huge-dir */
const char *argv7[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
"--file-prefix=hugedir", "--huge-dir", hugepath};
@@ -889,6 +864,7 @@ test_misc_flags(void)
const char *argv15[] = {prgname, "--file-prefix=intr",
"-c", "1", "-n", "2", "--vfio-intr=invalid"};
+ /* run all tests also applicable to FreeBSD first */
if (launch_proc(argv0) == 0) {
printf("Error - process ran ok with invalid flag\n");
@@ -902,6 +878,16 @@ test_misc_flags(void)
printf("Error - process did not run ok with -v flag\n");
return -1;
}
+ if (launch_proc(argv6) != 0) {
+ printf("Error - process did not run ok with --no-shconf flag\n");
+ return -1;
+ }
+
+#ifdef RTE_EXEC_ENV_BSDAPP
+ /* no more tests to be done on FreeBSD */
+ return 0;
+#endif
+
if (launch_proc(argv3) != 0) {
printf("Error - process did not run ok with --syslog flag\n");
return -1;
@@ -914,13 +900,6 @@ test_misc_flags(void)
printf("Error - process run ok with invalid --syslog flag\n");
return -1;
}
- if (launch_proc(argv6) != 0) {
- printf("Error - process did not run ok with --no-shconf flag\n");
- return -1;
- }
-#ifdef RTE_EXEC_ENV_BSDAPP
- return 0;
-#endif
if (launch_proc(argv7) != 0) {
printf("Error - process did not run ok with --huge-dir flag\n");
return -1;
@@ -977,9 +956,15 @@ test_file_prefix(void)
* 6. run a primary process with memtest2 prefix
* 7. check that only memtest2 hugefiles are present in the hugedir
*/
+ char prefix[PATH_MAX] = "";
#ifdef RTE_EXEC_ENV_BSDAPP
return 0;
+#else
+ if (get_current_prefix(prefix, sizeof(prefix)) == NULL) {
+ printf("Error - unable to get current prefix!\n");
+ return -1;
+ }
#endif
/* this should fail unless the test itself is run with "memtest" prefix */
@@ -994,12 +979,6 @@ test_file_prefix(void)
const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
"--file-prefix=" memtest2 };
- char prefix[32];
- if (get_current_prefix(prefix, sizeof(prefix)) == NULL) {
- printf("Error - unable to get current prefix!\n");
- return -1;
- }
-
/* check if files for current prefix are present */
if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
printf("Error - hugepage files for %s were not created!\n", prefix);
@@ -1138,10 +1117,11 @@ test_memory_flags(void)
#ifdef RTE_EXEC_ENV_BSDAPP
int i, num_sockets = 1;
#else
- int i, num_sockets = get_number_of_sockets();
+ int i, num_sockets = RTE_MIN(get_number_of_sockets(),
+ RTE_MAX_NUMA_NODES);
#endif
- if (num_sockets <= 0 || num_sockets > RTE_MAX_NUMA_NODES) {
+ if (num_sockets <= 0) {
printf("Error - cannot get number of sockets!\n");
return -1;
}
@@ -1151,11 +1131,12 @@ test_memory_flags(void)
/* add one extra socket */
for (i = 0; i < num_sockets + 1; i++) {
snprintf(buf, sizeof(buf), "%s%s", invalid_socket_mem, DEFAULT_MEM_SIZE);
- snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf);
+ strlcpy(invalid_socket_mem, buf, sizeof(invalid_socket_mem));
if (num_sockets + 1 - i > 1) {
snprintf(buf, sizeof(buf), "%s,", invalid_socket_mem);
- snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf);
+ strlcpy(invalid_socket_mem, buf,
+ sizeof(invalid_socket_mem));
}
}
@@ -1167,11 +1148,12 @@ test_memory_flags(void)
/* add one extra socket */
for (i = 0; i < num_sockets; i++) {
snprintf(buf, sizeof(buf), "%s%s", valid_socket_mem, DEFAULT_MEM_SIZE);
- snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf);
+ strlcpy(valid_socket_mem, buf, sizeof(valid_socket_mem));
if (num_sockets - i > 1) {
snprintf(buf, sizeof(buf), "%s,", valid_socket_mem);
- snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf);
+ strlcpy(valid_socket_mem, buf,
+ sizeof(valid_socket_mem));
}
}
diff --git a/test/test/test_event_crypto_adapter.c b/test/test/test_event_crypto_adapter.c
new file mode 100644
index 00000000..de258c34
--- /dev/null
+++ b/test/test/test_event_crypto_adapter.c
@@ -0,0 +1,928 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_service.h>
+#include <rte_event_crypto_adapter.h>
+#include "test.h"
+
+#define PKT_TRACE 0
+#define NUM 1
+#define DEFAULT_NUM_XFORMS (2)
+#define NUM_MBUFS (8191)
+#define MBUF_CACHE_SIZE (256)
+#define MAXIMUM_IV_LENGTH (16)
+#define DEFAULT_NUM_OPS_INFLIGHT (128)
+#define MAX_NB_SESSIONS 4
+#define TEST_APP_PORT_ID 0
+#define TEST_APP_EV_QUEUE_ID 0
+#define TEST_APP_EV_PRIORITY 0
+#define TEST_APP_EV_FLOWID 0xAABB
+#define TEST_CRYPTO_EV_QUEUE_ID 1
+#define TEST_ADAPTER_ID 0
+#define TEST_CDEV_ID 0
+#define TEST_CDEV_QP_ID 0
+#define PACKET_LENGTH 64
+#define NB_TEST_PORTS 1
+#define NB_TEST_QUEUES 2
+#define NUM_CORES 1
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op) + \
+ DEFAULT_NUM_XFORMS * \
+ sizeof(struct rte_crypto_sym_xform))
+
+/* Handle log statements in same manner as test macros */
+#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
+
+static const uint8_t text_64B[] = {
+ 0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50,
+ 0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0,
+ 0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36,
+ 0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e,
+ 0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c,
+ 0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e,
+ 0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6,
+ 0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c
+};
+
+struct event_crypto_adapter_test_params {
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_cryptodev_config *config;
+ uint8_t crypto_event_port_id;
+};
+
+struct rte_event response_info = {
+ .queue_id = TEST_APP_EV_QUEUE_ID,
+ .sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .flow_id = TEST_APP_EV_FLOWID,
+ .priority = TEST_APP_EV_PRIORITY
+};
+
+struct rte_event_crypto_request request_info = {
+ .cdev_id = TEST_CDEV_ID,
+ .queue_pair_id = TEST_CDEV_QP_ID
+};
+
+static struct event_crypto_adapter_test_params params;
+static uint8_t crypto_adapter_setup_done;
+static uint32_t slcore_id;
+static int evdev;
+
+static struct rte_mbuf *
+alloc_fill_mbuf(struct rte_mempool *mpool, const uint8_t *data,
+ size_t len, uint8_t blocksize)
+{
+ struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+ size_t t_len = len - (blocksize ? (len % blocksize) : 0);
+
+ if (m) {
+ char *dst = rte_pktmbuf_append(m, t_len);
+
+ if (!dst) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+
+ rte_memcpy(dst, (const void *)data, t_len);
+ }
+ return m;
+}
+
+static int
+send_recv_ev(struct rte_event *ev)
+{
+ struct rte_crypto_op *op;
+ struct rte_event recv_ev;
+ int ret;
+
+ ret = rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, ev, NUM);
+ TEST_ASSERT_EQUAL(ret, NUM,
+ "Failed to send event to crypto adapter\n");
+
+ while (rte_event_dequeue_burst(evdev,
+ TEST_APP_PORT_ID, &recv_ev, NUM, 0) == 0)
+ rte_pause();
+
+ op = recv_ev.event_ptr;
+#if PKT_TRACE
+ struct rte_mbuf *m = op->sym->m_src;
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_stats(void)
+{
+ struct rte_event_crypto_adapter_stats stats;
+
+ rte_event_crypto_adapter_stats_get(TEST_ADAPTER_ID, &stats);
+ printf(" +------------------------------------------------------+\n");
+ printf(" + Crypto adapter stats for instance %u:\n", TEST_ADAPTER_ID);
+ printf(" + Event port poll count %" PRIx64 "\n",
+ stats.event_poll_count);
+ printf(" + Event dequeue count %" PRIx64 "\n",
+ stats.event_deq_count);
+ printf(" + Cryptodev enqueue count %" PRIx64 "\n",
+ stats.crypto_enq_count);
+ printf(" + Cryptodev enqueue failed count %" PRIx64 "\n",
+ stats.crypto_enq_fail);
+ printf(" + Cryptodev dequeue count %" PRIx64 "\n",
+ stats.crypto_deq_count);
+ printf(" + Event enqueue count %" PRIx64 "\n",
+ stats.event_enq_count);
+ printf(" + Event enqueue retry count %" PRIx64 "\n",
+ stats.event_enq_retry_count);
+ printf(" + Event enqueue fail count %" PRIx64 "\n",
+ stats.event_enq_fail_count);
+ printf(" +------------------------------------------------------+\n");
+
+ rte_event_crypto_adapter_stats_reset(TEST_ADAPTER_ID);
+ return TEST_SUCCESS;
+}
+
+static int
+test_op_forward_mode(uint8_t session_less)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+ union rte_event_crypto_metadata m_data;
+ struct rte_crypto_sym_op *sym_op;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+ uint32_t cap;
+ int ret;
+
+ memset(&m_data, 0, sizeof(m_data));
+
+ m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
+#if PKT_TRACE
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ op = rte_crypto_op_alloc(params.op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op,
+ "Failed to allocate symmetric crypto operation struct\n");
+
+ sym_op = op->sym;
+
+ if (!session_less) {
+ sess = rte_cryptodev_sym_session_create(params.session_mpool);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
+ &cipher_xform, params.session_mpool);
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
+ evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
+ /* Fill in private user data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(response_info));
+ rte_memcpy(&m_data.request_info, &request_info,
+ sizeof(request_info));
+ rte_cryptodev_sym_session_set_user_data(sess,
+ &m_data, sizeof(m_data));
+ }
+
+ rte_crypto_op_attach_sym_session(op, sess);
+ } else {
+ struct rte_crypto_sym_xform *first_xform;
+
+ rte_crypto_op_sym_xforms_alloc(op, NUM);
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
+ first_xform = &cipher_xform;
+ sym_op->xform = first_xform;
+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
+ (sizeof(struct rte_crypto_sym_xform) * 2);
+ op->private_data_offset = len;
+ /* Fill in private data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(response_info));
+ rte_memcpy(&m_data.request_info, &request_info,
+ sizeof(request_info));
+ rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
+ }
+
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = PACKET_LENGTH;
+
+ /* Fill in event info and update event_ptr with rte_crypto_op */
+ memset(&ev, 0, sizeof(ev));
+ ev.queue_id = TEST_CRYPTO_EV_QUEUE_ID;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.flow_id = 0xAABB;
+ ev.event_ptr = op;
+
+ ret = send_recv_ev(&ev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to "
+ "crypto adapter\n");
+
+ test_crypto_adapter_stats();
+
+ return TEST_SUCCESS;
+}
+
+static int
+map_adapter_service_core(void)
+{
+ uint32_t adapter_service_id;
+ int ret;
+
+ if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
+ &adapter_service_id) == 0) {
+ uint32_t core_list[NUM_CORES];
+
+ ret = rte_service_lcore_list(core_list, NUM_CORES);
+ TEST_ASSERT(ret >= 0, "Failed to get service core list!");
+
+ if (core_list[0] != slcore_id) {
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+ }
+
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
+ adapter_service_id, slcore_id, 1),
+ "Failed to map adapter service");
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_sessionless_with_op_forward_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_forward_mode(1);
+ TEST_ASSERT_SUCCESS(ret, "Sessionless - FORWARD mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+test_session_with_op_forward_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID
+ ), "Failed to start event crypto adapter");
+
+ ret = test_op_forward_mode(0);
+ TEST_ASSERT_SUCCESS(ret, "Session based - FORWARD mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+send_op_recv_ev(struct rte_crypto_op *op)
+{
+ struct rte_crypto_op *recv_op;
+ struct rte_event ev;
+ int ret;
+
+ ret = rte_cryptodev_enqueue_burst(TEST_CDEV_ID, TEST_CDEV_QP_ID,
+ &op, NUM);
+ TEST_ASSERT_EQUAL(ret, NUM, "Failed to enqueue to cryptodev\n");
+ memset(&ev, 0, sizeof(ev));
+
+ while (rte_event_dequeue_burst(evdev,
+ TEST_APP_PORT_ID, &ev, NUM, 0) == 0)
+ rte_pause();
+
+ recv_op = ev.event_ptr;
+#if PKT_TRACE
+ struct rte_mbuf *m = recv_op->sym->m_src;
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ rte_pktmbuf_free(recv_op->sym->m_src);
+ rte_crypto_op_free(recv_op);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_op_new_mode(uint8_t session_less)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+ union rte_event_crypto_metadata m_data;
+ struct rte_crypto_sym_op *sym_op;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint32_t cap;
+ int ret;
+
+ memset(&m_data, 0, sizeof(m_data));
+
+ m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
+#if PKT_TRACE
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ op = rte_crypto_op_alloc(params.op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n");
+
+ sym_op = op->sym;
+
+ if (!session_less) {
+ sess = rte_cryptodev_sym_session_create(params.session_mpool);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
+ evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
+ /* Fill in private user data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(m_data));
+ rte_cryptodev_sym_session_set_user_data(sess,
+ &m_data, sizeof(m_data));
+ }
+ rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
+ &cipher_xform, params.session_mpool);
+ rte_crypto_op_attach_sym_session(op, sess);
+ } else {
+ struct rte_crypto_sym_xform *first_xform;
+
+ rte_crypto_op_sym_xforms_alloc(op, NUM);
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
+ first_xform = &cipher_xform;
+ sym_op->xform = first_xform;
+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
+ (sizeof(struct rte_crypto_sym_xform) * 2);
+ op->private_data_offset = len;
+ /* Fill in private data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(m_data));
+ rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
+ }
+
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = PACKET_LENGTH;
+
+ ret = send_op_recv_ev(op);
+ TEST_ASSERT_SUCCESS(ret, "Failed to enqueue op to cryptodev\n");
+
+ test_crypto_adapter_stats();
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_sessionless_with_op_new_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
+ map_adapter_service_core();
+
+ /* start the event crypto adapter */
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_new_mode(1);
+ TEST_ASSERT_SUCCESS(ret, "Sessionless - NEW mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+test_session_with_op_new_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_new_mode(0);
+ TEST_ASSERT_SUCCESS(ret, "Session based - NEW mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+configure_cryptodev(void)
+{
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_info info;
+ unsigned int session_size;
+ uint8_t nb_devs;
+ int ret;
+
+ params.mbuf_pool = rte_pktmbuf_pool_create(
+ "CRYPTO_ADAPTER_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
+ if (params.mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n");
+ return TEST_FAILED;
+ }
+
+ params.op_mpool = rte_crypto_op_pool_create(
+ "EVENT_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform) +
+ MAXIMUM_IV_LENGTH,
+ rte_socket_id());
+ if (params.op_mpool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
+ return TEST_FAILED;
+ }
+
+ /* Create a NULL crypto device */
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD)));
+ if (!nb_devs) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create pmd:%s instance\n",
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD));
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (!nb_devs) {
+ RTE_LOG(ERR, USER1, "No crypto devices found!\n");
+ return TEST_FAILED;
+ }
+
+ /*
+ * Create mempool with maximum number of sessions * 2,
+ * to include the session headers & private data
+ */
+ session_size = rte_cryptodev_sym_get_private_session_size(TEST_CDEV_ID);
+ session_size += sizeof(union rte_event_crypto_metadata);
+
+ params.session_mpool = rte_mempool_create(
+ "CRYPTO_ADAPTER_SESSION_MP",
+ MAX_NB_SESSIONS * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(params.session_mpool,
+ "session mempool allocation failed\n");
+
+ rte_cryptodev_info_get(TEST_CDEV_ID, &info);
+ conf.nb_queue_pairs = info.max_nb_queue_pairs;
+ conf.socket_id = SOCKET_ID_ANY;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(TEST_CDEV_ID, &conf),
+ "Failed to configure cryptodev %u with %u qps\n",
+ TEST_CDEV_ID, conf.nb_queue_pairs);
+
+ qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &qp_conf,
+ rte_cryptodev_socket_id(TEST_CDEV_ID),
+ params.session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u\n",
+ TEST_CDEV_QP_ID, TEST_CDEV_ID);
+
+ return TEST_SUCCESS;
+}
+
+static inline void
+evdev_set_conf_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = NB_TEST_PORTS;
+ dev_conf->nb_event_queues = NB_TEST_QUEUES;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+configure_eventdev(void)
+{
+ struct rte_event_queue_conf queue_conf;
+ struct rte_event_dev_config devconf;
+ struct rte_event_dev_info info;
+ uint32_t queue_count;
+ uint32_t port_count;
+ int ret;
+ uint8_t qid;
+
+ if (!rte_event_dev_count()) {
+ /* If there is no hardware eventdev, or no software vdev was
+ * specified on the command line, create an instance of
+ * event_sw.
+ */
+ LOG_DBG("Failed to find a valid event device... "
+ "testing with event_sw device\n");
+ TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
+ "Error creating eventdev");
+ evdev = rte_event_dev_get_dev_id("event_sw0");
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
+
+ evdev_set_conf_values(&devconf, &info);
+
+ ret = rte_event_dev_configure(evdev, &devconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
+
+ /* Set up event queue */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count);
+ TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
+ TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
+
+ queue_conf.nb_atomic_flows = info.max_event_queue_flows;
+ queue_conf.nb_atomic_order_sequences = 32;
+ queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+ queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+
+ qid = TEST_CRYPTO_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, &queue_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
+
+ /* Set up event port */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count);
+ TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
+ TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
+
+ ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
+ TEST_APP_PORT_ID);
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
+ TEST_APP_PORT_ID);
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_crypto_adapter_free(void)
+{
+ rte_event_crypto_adapter_free(TEST_ADAPTER_ID);
+}
+
+static int
+test_crypto_adapter_create(void)
+{
+ struct rte_event_port_conf conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+ int ret;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
+ TEST_CDEV_ID,
+ &conf, 0);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_qp_add_del(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+ } else
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to create add queue pair\n");
+
+ ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID);
+ TEST_ASSERT_SUCCESS(ret, "Failed to delete add queue pair\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
+{
+ struct rte_event_port_conf conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ uint32_t cap;
+ int ret;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
+ TEST_CDEV_ID,
+ &conf, mode);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+ } else
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to add queue pair\n");
+
+ ret = rte_event_crypto_adapter_event_port_get(TEST_ADAPTER_ID,
+ &params.crypto_event_port_id);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_crypto_adapter_stop(void)
+{
+ uint32_t evdev_service_id, adapter_service_id;
+
+ /* retrieve service ids & stop services */
+ if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
+ &adapter_service_id) == 0) {
+ rte_service_runstate_set(adapter_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_event_crypto_adapter_stop(TEST_ADAPTER_ID);
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ rte_service_runstate_set(evdev_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_event_dev_stop(evdev);
+ }
+}
+
+static int
+test_crypto_adapter_conf(enum rte_event_crypto_adapter_mode mode)
+{
+ uint32_t evdev_service_id;
+ uint8_t qid;
+ int ret;
+
+ if (!crypto_adapter_setup_done) {
+ ret = configure_event_crypto_adapter(mode);
+ if (!ret) {
+ qid = TEST_CRYPTO_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev,
+ params.crypto_event_port_id, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue %d "
+ "port=%u\n", qid,
+ params.crypto_event_port_id);
+ }
+ crypto_adapter_setup_done = 1;
+ }
+
+ /* retrieve service ids */
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ /* add a service core and start it */
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+
+ /* map services to it */
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
+ slcore_id, 1), "Failed to map evdev service");
+
+ /* set services to running */
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
+ 1), "Failed to start evdev service");
+ }
+
+ /* start the eventdev */
+ TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
+ "Failed to start event device");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_conf_op_forward_mode(void)
+{
+ enum rte_event_crypto_adapter_mode mode;
+
+ mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD;
+ test_crypto_adapter_conf(mode);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_conf_op_new_mode(void)
+{
+ enum rte_event_crypto_adapter_mode mode;
+
+ mode = RTE_EVENT_CRYPTO_ADAPTER_OP_NEW;
+ test_crypto_adapter_conf(mode);
+ return TEST_SUCCESS;
+}
+
+
+static int
+testsuite_setup(void)
+{
+ int ret;
+
+ slcore_id = rte_get_next_lcore(-1, 1, 0);
+ TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
+ "are required to run this autotest\n");
+
+ /* Setup and start event device. */
+ ret = configure_eventdev();
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
+
+ /* Setup and start crypto device. */
+ ret = configure_cryptodev();
+ TEST_ASSERT_SUCCESS(ret, "cryptodev initialization failed\n");
+
+ return TEST_SUCCESS;
+}
+
+static void
+crypto_teardown(void)
+{
+ /* Free mbuf mempool */
+ if (params.mbuf_pool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_MBUFPOOL count %u\n",
+ rte_mempool_avail_count(params.mbuf_pool));
+ rte_mempool_free(params.mbuf_pool);
+ params.mbuf_pool = NULL;
+ }
+
+ /* Free session mempool */
+ if (params.session_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_SESSION_MP count %u\n",
+ rte_mempool_avail_count(params.session_mpool));
+ rte_mempool_free(params.session_mpool);
+ params.session_mpool = NULL;
+ }
+
+ /* Free ops mempool */
+ if (params.op_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "EVENT_CRYPTO_SYM_OP_POOL count %u\n",
+ rte_mempool_avail_count(params.op_mpool));
+ rte_mempool_free(params.op_mpool);
+ params.op_mpool = NULL;
+ }
+}
+
+static void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+}
+
+static void
+testsuite_teardown(void)
+{
+ crypto_teardown();
+ eventdev_teardown();
+}
+
+static struct unit_test_suite functional_testsuite = {
+ .suite_name = "Event crypto adapter test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+
+ TEST_CASE_ST(NULL, test_crypto_adapter_free,
+ test_crypto_adapter_create),
+
+ TEST_CASE_ST(test_crypto_adapter_create,
+ test_crypto_adapter_free,
+ test_crypto_adapter_qp_add_del),
+
+ TEST_CASE_ST(test_crypto_adapter_create,
+ test_crypto_adapter_free,
+ test_crypto_adapter_stats),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
+ test_crypto_adapter_stop,
+ test_session_with_op_forward_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
+ test_crypto_adapter_stop,
+ test_sessionless_with_op_forward_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
+ test_crypto_adapter_stop,
+ test_session_with_op_new_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
+ test_crypto_adapter_stop,
+ test_sessionless_with_op_new_mode),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_crypto_adapter(void)
+{
+ return unit_test_suite_runner(&functional_testsuite);
+}
+
+REGISTER_TEST_COMMAND(event_crypto_adapter_autotest,
+ test_event_crypto_adapter);
diff --git a/test/test/test_event_eth_rx_adapter.c b/test/test/test_event_eth_rx_adapter.c
index 006ed314..2337e543 100644
--- a/test/test/test_event_eth_rx_adapter.c
+++ b/test/test/test_event_eth_rx_adapter.c
@@ -25,36 +25,25 @@ struct event_eth_rx_adapter_test_params {
struct rte_mempool *mp;
uint16_t rx_rings, tx_rings;
uint32_t caps;
+ int rx_intr_port_inited;
+ uint16_t rx_intr_port;
};
static struct event_eth_rx_adapter_test_params default_params;
static inline int
-port_init(uint8_t port, struct rte_mempool *mp)
+port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
+ struct rte_mempool *mp)
{
- static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_hf = ETH_RSS_IP |
- ETH_RSS_TCP |
- ETH_RSS_UDP,
- }
- }
- };
const uint16_t rx_ring_size = 512, tx_ring_size = 512;
- struct rte_eth_conf port_conf = port_conf_default;
int retval;
uint16_t q;
struct rte_eth_dev_info dev_info;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
- retval = rte_eth_dev_configure(port, 0, 0, &port_conf);
+ retval = rte_eth_dev_configure(port, 0, 0, port_conf);
rte_eth_dev_info_get(port, &dev_info);
@@ -64,7 +53,7 @@ port_init(uint8_t port, struct rte_mempool *mp)
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, default_params.rx_rings,
- default_params.tx_rings, &port_conf);
+ default_params.tx_rings, port_conf);
if (retval != 0)
return retval;
@@ -104,22 +93,99 @@ port_init(uint8_t port, struct rte_mempool *mp)
return 0;
}
+static inline int
+port_init_rx_intr(uint8_t port, struct rte_mempool *mp)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .intr_conf = {
+ .rxq = 1,
+ },
+ };
+
+ return port_init_common(port, &port_conf_default, mp);
+}
+
+static inline int
+port_init(uint8_t port, struct rte_mempool *mp)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_hf = ETH_RSS_IP |
+ ETH_RSS_TCP |
+ ETH_RSS_UDP,
+ }
+ }
+ };
+
+ return port_init_common(port, &port_conf_default, mp);
+}
+
static int
-init_ports(int num_ports)
+init_port_rx_intr(int num_ports)
{
- uint8_t portid;
int retval;
+ uint16_t portid;
+ int err;
default_params.mp = rte_pktmbuf_pool_create("packet_pool",
+ NB_MBUFS,
+ MBUF_CACHE_SIZE,
+ MBUF_PRIV_SIZE,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (!default_params.mp)
+ return -ENOMEM;
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ retval = port_init_rx_intr(portid, default_params.mp);
+ if (retval)
+ continue;
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
+ &default_params.caps);
+ if (err)
+ continue;
+ if (!(default_params.caps &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ default_params.rx_intr_port_inited = 1;
+ default_params.rx_intr_port = portid;
+ return 0;
+ }
+ rte_eth_dev_stop(portid);
+ }
+ return 0;
+}
+
+static int
+init_ports(int num_ports)
+{
+ uint16_t portid;
+ int retval;
+
+ struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
+
+ if (ptr == NULL)
+ default_params.mp = rte_pktmbuf_pool_create("packet_pool",
NB_MBUFS,
MBUF_CACHE_SIZE,
MBUF_PRIV_SIZE,
RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
+ else
+ default_params.mp = ptr;
+
if (!default_params.mp)
return -ENOMEM;
- for (portid = 0; portid < num_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
retval = port_init(portid, default_params.mp);
if (retval)
return retval;
@@ -164,7 +230,7 @@ testsuite_setup(void)
* so rte_eth_dev_start invokes rte_event_dev_start internally, so
* call init_ports after rte_event_dev_configure
*/
- err = init_ports(rte_eth_dev_count());
+ err = init_ports(rte_eth_dev_count_total());
TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
@@ -175,16 +241,77 @@ testsuite_setup(void)
return err;
}
+static int
+testsuite_setup_rx_intr(void)
+{
+ int err;
+ uint8_t count;
+ struct rte_event_dev_info dev_info;
+
+ count = rte_event_dev_count();
+ if (!count) {
+ printf("Failed to find a valid event device,"
+ " testing with event_skeleton device\n");
+ rte_vdev_init("event_skeleton", NULL);
+ }
+
+ struct rte_event_dev_config config = {
+ .nb_event_queues = 1,
+ .nb_event_ports = 1,
+ };
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+ config.nb_event_queue_flows = dev_info.max_event_queue_flows;
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+ config.nb_events_limit =
+ dev_info.max_num_events;
+
+ err = rte_event_dev_configure(TEST_DEV_ID, &config);
+ TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
+ err);
+
+ /*
+ * eth devices like octeontx use event device to receive packets
+ * so rte_eth_dev_start invokes rte_event_dev_start internally, so
+ * call init_ports after rte_event_dev_configure
+ */
+ err = init_port_rx_intr(rte_eth_dev_count_total());
+ TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+ if (!default_params.rx_intr_port_inited)
+ return 0;
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
+ default_params.rx_intr_port,
+ &default_params.caps);
+ TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
+
+ return err;
+}
+
static void
testsuite_teardown(void)
{
uint32_t i;
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rte_eth_dev_stop(i);
rte_mempool_free(default_params.mp);
}
+static void
+testsuite_teardown_rx_intr(void)
+{
+ if (!default_params.rx_intr_port_inited)
+ return;
+
+ rte_eth_dev_stop(default_params.rx_intr_port);
+ rte_mempool_free(default_params.mp);
+}
+
static int
adapter_create(void)
{
@@ -273,7 +400,7 @@ adapter_queue_add_del(void)
queue_config.servicing_weight = 1;
err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
- rte_eth_dev_count(),
+ rte_eth_dev_count_total(),
-1, &queue_config);
TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
@@ -333,6 +460,151 @@ adapter_queue_add_del(void)
}
static int
+adapter_multi_eth_add_del(void)
+{
+ int err;
+ struct rte_event ev;
+
+ uint16_t port_index, drv_id = 0;
+ char driver_name[50];
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+
+ /* stop eth devices for existing */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1)
+ rte_eth_dev_stop(port_index);
+
+ /* add the max port for rx_adapter */
+ port_index = rte_eth_dev_count_total();
+ for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
+ sprintf(driver_name, "%s%u", "net_null", drv_id);
+ err = rte_vdev_init(driver_name, NULL);
+ TEST_ASSERT(err == 0, "Failed driver %s got %d",
+ driver_name, err);
+ drv_id += 1;
+ }
+
+ err = init_ports(rte_eth_dev_count_total());
+ TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+ /* creating new instance for all newly added eth devices */
+ adapter_create();
+
+ /* eth_rx_adapter_queue_add for n ports */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ port_index, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ /* eth_rx_adapter_queue_del n ports */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ port_index, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ adapter_free();
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_intr_queue_add_del(void)
+{
+ int err;
+ struct rte_event ev;
+ uint32_t cap;
+ uint16_t eth_port;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ if (!default_params.rx_intr_port_inited)
+ return 0;
+
+ eth_port = default_params.rx_intr_port;
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ queue_config.ev = ev;
+
+ /* weight = 0 => interrupt mode */
+ queue_config.servicing_weight = 0;
+
+ /* add queue 0 */
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* add all queues */
+ queue_config.servicing_weight = 0;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del queue 0 */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del remaining queues */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* add all queues */
+ queue_config.servicing_weight = 0;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* intr -> poll mode queue */
+ queue_config.servicing_weight = 1;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del queues */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static int
adapter_start_stop(void)
{
int err;
@@ -402,7 +674,7 @@ adapter_stats(void)
return TEST_SUCCESS;
}
-static struct unit_test_suite service_tests = {
+static struct unit_test_suite event_eth_rx_tests = {
.suite_name = "rx event eth adapter test suite",
.setup = testsuite_setup,
.teardown = testsuite_teardown,
@@ -410,17 +682,37 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(NULL, NULL, adapter_create_free),
TEST_CASE_ST(adapter_create, adapter_free,
adapter_queue_add_del),
+ TEST_CASE_ST(NULL, NULL, adapter_multi_eth_add_del),
TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
+static struct unit_test_suite event_eth_rx_intr_tests = {
+ .suite_name = "rx event eth adapter test suite",
+ .setup = testsuite_setup_rx_intr,
+ .teardown = testsuite_teardown_rx_intr,
+ .unit_test_cases = {
+ TEST_CASE_ST(adapter_create, adapter_free,
+ adapter_intr_queue_add_del),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static int
test_event_eth_rx_adapter_common(void)
{
- return unit_test_suite_runner(&service_tests);
+ return unit_test_suite_runner(&event_eth_rx_tests);
+}
+
+static int
+test_event_eth_rx_intr_adapter_common(void)
+{
+ return unit_test_suite_runner(&event_eth_rx_intr_tests);
}
REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
test_event_eth_rx_adapter_common);
+REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
+ test_event_eth_rx_intr_adapter_common);
diff --git a/test/test/test_event_timer_adapter.c b/test/test/test_event_timer_adapter.c
new file mode 100644
index 00000000..93471db1
--- /dev/null
+++ b/test/test/test_event_timer_adapter.c
@@ -0,0 +1,1830 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2017-2018 Intel Corporation.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_mempool.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_service.h>
+#include <stdbool.h>
+
+#include "test.h"
+
+/* 4K timers corresponds to sw evdev max inflight events */
+#define MAX_TIMERS (4 * 1024)
+#define BKT_TCK_NSEC
+
+#define NSECPERSEC 1E9
+#define BATCH_SIZE 16
+/* Both the app lcore and adapter ports are linked to this queue */
+#define TEST_QUEUE_ID 0
+/* Port the application dequeues from */
+#define TEST_PORT_ID 0
+#define TEST_ADAPTER_ID 0
+
+/* Handle log statements in same manner as test macros */
+#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
+
+static int evdev;
+static struct rte_event_timer_adapter *timdev;
+static struct rte_mempool *eventdev_test_mempool;
+static struct rte_ring *timer_producer_ring;
+static uint64_t global_bkt_tck_ns;
+static volatile uint8_t arm_done;
+
+static bool using_services;
+static uint32_t test_lcore1;
+static uint32_t test_lcore2;
+static uint32_t test_lcore3;
+static uint32_t sw_evdev_slcore;
+static uint32_t sw_adptr_slcore;
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = 1;
+ dev_conf->nb_event_queues = 1;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ int ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ uint32_t service_id;
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ TEST_ASSERT(info.max_num_events >= (int32_t)MAX_TIMERS,
+ "ERROR max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_TIMERS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ ret = rte_event_queue_setup(evdev, 0, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0);
+
+ /* Configure event port */
+ ret = rte_event_port_setup(evdev, 0, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0);
+ ret = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0);
+
+ /* If this is a software event device, map and start its service */
+ if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(
+ sw_evdev_slcore),
+ "Failed to start service core");
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
+ service_id, sw_evdev_slcore, 1),
+ "Failed to map evdev service");
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(
+ service_id, 1),
+ "Failed to start evdev service");
+ }
+
+ ret = rte_event_dev_start(evdev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return TEST_SUCCESS;
+}
+
+static int
+testsuite_setup(void)
+{
+ /* Some of the multithreaded tests require 3 other lcores to run */
+ unsigned int required_lcore_count = 4;
+ uint32_t service_id;
+
+ /* To make it easier to map services later if needed, just reset
+ * service core state.
+ */
+ (void) rte_service_lcore_reset_all();
+
+ if (!rte_event_dev_count()) {
+ /* If there is no hardware eventdev, or no software vdev was
+ * specified on the command line, create an instance of
+ * event_sw.
+ */
+ LOG_DBG("Failed to find a valid event device... testing with"
+ " event_sw device\n");
+ TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
+ "Error creating eventdev");
+ evdev = rte_event_dev_get_dev_id("event_sw0");
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
+ /* A software event device will use a software event timer
+ * adapter as well. 2 more cores required to convert to
+ * service cores.
+ */
+ required_lcore_count += 2;
+ using_services = true;
+ }
+
+ if (rte_lcore_count() < required_lcore_count) {
+ printf("%d lcores needed to run tests", required_lcore_count);
+ return TEST_FAILED;
+ }
+
+ /* Assign lcores for various tasks */
+ test_lcore1 = rte_get_next_lcore(-1, 1, 0);
+ test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0);
+ test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0);
+ if (using_services) {
+ sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0);
+ sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0);
+ }
+
+ return eventdev_setup();
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+}
+
+static int
+setup_adapter_service(struct rte_event_timer_adapter *adptr)
+{
+ uint32_t adapter_service_id;
+ int ret;
+
+ /* retrieve service ids */
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr,
+ &adapter_service_id), "Failed to get event timer "
+ "adapter service id");
+ /* add a service core and start it */
+ ret = rte_service_lcore_add(sw_adptr_slcore);
+ TEST_ASSERT(ret == 0 || ret == -EALREADY,
+ "Failed to add service core");
+ ret = rte_service_lcore_start(sw_adptr_slcore);
+ TEST_ASSERT(ret == 0 || ret == -EALREADY,
+ "Failed to start service core");
+
+ /* map services to it */
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id,
+ sw_adptr_slcore, 1),
+ "Failed to map adapter service");
+
+ /* set services to running */
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1),
+ "Failed to start event timer adapter service");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ void *conf_arg)
+{
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf *port_conf, def_port_conf = {0};
+ uint32_t started;
+ static int port_allocated;
+ static uint8_t port_id;
+ int ret;
+
+ if (port_allocated) {
+ *event_port_id = port_id;
+ return 0;
+ }
+
+ RTE_SET_USED(id);
+
+ ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED,
+ &started);
+ if (ret < 0)
+ return ret;
+
+ if (started)
+ rte_event_dev_stop(event_dev_id);
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ if (ret < 0)
+ return ret;
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports++;
+
+ ret = rte_event_dev_configure(event_dev_id, &dev_conf);
+ if (ret < 0) {
+ if (started)
+ rte_event_dev_start(event_dev_id);
+ return ret;
+ }
+
+ if (conf_arg != NULL)
+ port_conf = conf_arg;
+ else {
+ port_conf = &def_port_conf;
+ ret = rte_event_port_default_conf_get(event_dev_id, port_id,
+ port_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_event_port_setup(event_dev_id, port_id, port_conf);
+ if (ret < 0)
+ return ret;
+
+ *event_port_id = port_id;
+
+ if (started)
+ rte_event_dev_start(event_dev_id);
+
+ /* Reuse this port number next time this is called */
+ port_allocated = 1;
+
+ return 0;
+}
+
+static int
+_timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns)
+{
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = evdev,
+ .timer_adapter_id = TEST_ADAPTER_ID,
+ .timer_tick_ns = bkt_tck_ns,
+ .max_tmo_ns = max_tmo_ns,
+ .nb_timers = MAX_TIMERS * 10,
+ };
+ uint32_t caps = 0;
+ const char *pool_name = "timdev_test_pool";
+
+ global_bkt_tck_ns = bkt_tck_ns;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
+ "failed to get adapter capabilities");
+ if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ timdev = rte_event_timer_adapter_create_ext(&config,
+ test_port_conf_cb,
+ NULL);
+ setup_adapter_service(timdev);
+ using_services = true;
+ } else
+ timdev = rte_event_timer_adapter_create(&config);
+
+ TEST_ASSERT_NOT_NULL(timdev,
+ "failed to create event timer ring");
+
+ TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0,
+ "failed to Start event timer adapter");
+
+ /* Create event timer mempool */
+ eventdev_test_mempool = rte_mempool_create(pool_name,
+ MAX_TIMERS * 2,
+ sizeof(struct rte_event_timer), /* element size*/
+ 0, /* cache size*/
+ 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(), 0);
+ if (!eventdev_test_mempool) {
+ printf("ERROR creating mempool\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+timdev_setup_usec(void)
+{
+ return using_services ?
+ /* Max timeout is 10,000us and bucket interval is 100us */
+ _timdev_setup(1E7, 1E5) :
+ /* Max timeout is 100us and bucket interval is 1us */
+ _timdev_setup(1E5, 1E3);
+}
+
+static int
+timdev_setup_usec_multicore(void)
+{
+ return using_services ?
+ /* Max timeout is 10,000us and bucket interval is 100us */
+ _timdev_setup(1E7, 1E5) :
+ /* Max timeout is 100us and bucket interval is 1us */
+ _timdev_setup(1E5, 1E3);
+}
+
+static int
+timdev_setup_msec(void)
+{
+ /* Max timeout is 2 mins, and bucket interval is 100 ms */
+ return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10);
+}
+
+static int
+timdev_setup_sec(void)
+{
+ /* Max timeout is 100sec and bucket interval is 1sec */
+ return _timdev_setup(1E11, 1E9);
+}
+
+static int
+timdev_setup_sec_multicore(void)
+{
+ /* Max timeout is 100sec and bucket interval is 1sec */
+ return _timdev_setup(1E11, 1E9);
+}
+
+static void
+timdev_teardown(void)
+{
+ rte_event_timer_adapter_stop(timdev);
+ rte_event_timer_adapter_free(timdev);
+
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline int
+test_timer_state(void)
+{
+ struct rte_event_timer *ev_tim;
+ struct rte_event ev;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+ ev_tim->timeout_ticks = 120;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
+ "Armed timer exceeding max_timeout.");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
+
+ ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
+ ev_tim->timeout_ticks = 10;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
+ "Failed to arm timer with proper timeout.");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_ARMED, ev_tim->state);
+
+ if (!using_services)
+ rte_delay_us(20);
+ else
+ rte_delay_us(1000 + 200);
+ TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1,
+ "Armed timer failed to trigger.");
+
+ ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
+ ev_tim->timeout_ticks = 90;
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
+ "Failed to arm timer with proper timeout.");
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
+ 1, "Failed to cancel armed timer");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_CANCELED, ev_tim->state);
+
+ rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+_arm_timers(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers; i++) {
+
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+_wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count,
+ uint64_t cancel_count)
+{
+ uint8_t valid_event;
+ uint64_t events = 0;
+ uint64_t wait_start, max_wait;
+ struct rte_event ev;
+
+ max_wait = rte_get_timer_hz() * wait_sec;
+ wait_start = rte_get_timer_cycles();
+ while (1) {
+ if (rte_get_timer_cycles() - wait_start > max_wait) {
+ if (events + cancel_count != arm_count)
+ TEST_ASSERT_SUCCESS(max_wait,
+ "Max time limit for timers exceeded.");
+ break;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ rte_mempool_put(eventdev_test_mempool, ev.event_ptr);
+ events++;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm(void)
+{
+ TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
+ "Failed to arm timers");
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
+ "Timer triggered count doesn't match arm count");
+ return TEST_SUCCESS;
+}
+
+static int
+_arm_wrapper(void *arg)
+{
+ RTE_SET_USED(arg);
+
+ TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
+ "Failed to arm timers");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_multicore(void)
+{
+
+ uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0);
+ uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0);
+
+ rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1);
+ rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2);
+
+ rte_eal_mp_wait_lcore();
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+#define MAX_BURST 16
+static inline int
+_arm_timers_burst(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ int j;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers / MAX_BURST; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
+ eventdev_test_mempool,
+ (void **)ev_tim, MAX_BURST),
+ "mempool alloc failed");
+
+ for (j = 0; j < MAX_BURST; j++) {
+ *ev_tim[j] = tim;
+ ev_tim[j]->ev.event_ptr = ev_tim[j];
+ }
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
+ ev_tim, tim.timeout_ticks, MAX_BURST),
+ MAX_BURST, "Failed to arm timer %d", rte_errno);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_burst(void)
+{
+ TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
+ "Failed to arm timers");
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_arm_wrapper_burst(void *arg)
+{
+ RTE_SET_USED(arg);
+
+ TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
+ "Failed to arm timers");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_burst_multicore(void)
+{
+ rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1);
+ rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2);
+
+ rte_eal_mp_wait_lcore();
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel(void)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 20,
+ };
+
+ for (i = 0; i < MAX_TIMERS; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ rte_delay_us(100 + (i % 5000));
+
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
+ &ev_tim, 1), 1,
+ "Failed to cancel event timer %d", rte_errno);
+ rte_mempool_put(eventdev_test_mempool, ev_tim);
+ }
+
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ MAX_TIMERS),
+ "Timer triggered count doesn't match arm, cancel count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
+ "Failed to arm event timer");
+
+ while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0)
+ ;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers)
+{
+
+ uint64_t i;
+ int j, ret;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+ int arm_count = 0;
+
+ for (i = 0; i < timers / MAX_BURST; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
+ eventdev_test_mempool,
+ (void **)ev_tim, MAX_BURST),
+ "mempool alloc failed");
+
+ for (j = 0; j < MAX_BURST; j++) {
+ *ev_tim[j] = tim;
+ ev_tim[j]->ev.event_ptr = ev_tim[j];
+ }
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
+ ev_tim, tim.timeout_ticks, MAX_BURST),
+ MAX_BURST, "Failed to arm timer %d", rte_errno);
+
+ for (j = 0; j < MAX_BURST; j++)
+ TEST_ASSERT_EQUAL(ev_tim[j]->state,
+ RTE_EVENT_TIMER_ARMED,
+ "Event timer not armed, state = %d",
+ ev_tim[j]->state);
+
+ ret = rte_ring_enqueue_bulk(timer_producer_ring,
+ (void **)ev_tim, MAX_BURST, NULL);
+ TEST_ASSERT_EQUAL(ret, MAX_BURST,
+ "Failed to enqueue event timers to ring");
+ arm_count += ret;
+ }
+
+ TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS,
+ "Failed to arm expected number of event timers");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer_wrapper(void *args)
+{
+ RTE_SET_USED(args);
+
+ return _cancel_producer(20, MAX_TIMERS);
+}
+
+static int
+_cancel_producer_burst_wrapper(void *args)
+{
+ RTE_SET_USED(args);
+
+ return _cancel_producer_burst(100, MAX_TIMERS);
+}
+
+static int
+_cancel_thread(void *args)
+{
+ RTE_SET_USED(args);
+ struct rte_event_timer *ev_tim = NULL;
+ uint64_t cancel_count = 0;
+ uint16_t ret;
+
+ while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
+ if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim))
+ continue;
+
+ ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer");
+ rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
+ cancel_count++;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_burst_thread(void *args)
+{
+ RTE_SET_USED(args);
+
+ int ret, i, n;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ uint64_t cancel_count = 0;
+ uint64_t dequeue_count = 0;
+
+ while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
+ n = rte_ring_dequeue_burst(timer_producer_ring,
+ (void **)ev_tim, MAX_BURST, NULL);
+ if (!n)
+ continue;
+
+ dequeue_count += n;
+
+ for (i = 0; i < n; i++)
+ TEST_ASSERT_EQUAL(ev_tim[i]->state,
+ RTE_EVENT_TIMER_ARMED,
+ "Event timer not armed, state = %d",
+ ev_tim[i]->state);
+
+ ret = rte_event_timer_cancel_burst(timdev, ev_tim, n);
+ TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of "
+ "event timers");
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim,
+ ret);
+
+ cancel_count += ret;
+ }
+
+ TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS,
+ "Failed to cancel expected number of timers: "
+ "expected = %d, cancel_count = %"PRIu64", "
+ "dequeue_count = %"PRIu64"\n", MAX_TIMERS,
+ cancel_count, dequeue_count);
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_multicore(void)
+{
+ arm_done = 0;
+ timer_producer_ring = rte_ring_create("timer_cancel_queue",
+ MAX_TIMERS * 2, rte_socket_id(), 0);
+ TEST_ASSERT_NOT_NULL(timer_producer_ring,
+ "Unable to reserve memory for ring");
+
+ rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3);
+ rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1);
+ rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2);
+
+ rte_eal_wait_lcore(test_lcore1);
+ rte_eal_wait_lcore(test_lcore2);
+ arm_done = 1;
+ rte_eal_wait_lcore(test_lcore3);
+ rte_ring_free(timer_producer_ring);
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2,
+ MAX_TIMERS * 2),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_burst_multicore(void)
+{
+ arm_done = 0;
+ timer_producer_ring = rte_ring_create("timer_cancel_queue",
+ MAX_TIMERS * 2, rte_socket_id(), 0);
+ TEST_ASSERT_NOT_NULL(timer_producer_ring,
+ "Unable to reserve memory for ring");
+
+ rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2);
+ rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL,
+ test_lcore1);
+
+ rte_eal_wait_lcore(test_lcore1);
+ arm_done = 1;
+ rte_eal_wait_lcore(test_lcore2);
+ rte_ring_free(timer_producer_ring);
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ MAX_TIMERS),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_random(void)
+{
+ uint64_t i;
+ uint64_t events_canceled = 0;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 20,
+ };
+
+ for (i = 0; i < MAX_TIMERS; i++) {
+
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ if (rte_rand() & 1) {
+ rte_delay_us(100 + (i % 5000));
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(
+ timdev,
+ &ev_tim, 1), 1,
+ "Failed to cancel event timer %d", rte_errno);
+ rte_mempool_put(eventdev_test_mempool, ev_tim);
+ events_canceled++;
+ }
+ }
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ events_canceled),
+ "Timer triggered count doesn't match arm, cancel count");
+
+ return TEST_SUCCESS;
+}
+
+/* Check that the adapter can be created correctly */
+static int
+adapter_create(void)
+{
+ int adapter_id = 0;
+ struct rte_event_timer_adapter *adapter, *adapter2;
+
+ struct rte_event_timer_adapter_conf conf = {
+ .event_dev_id = evdev + 1, // invalid event dev id
+ .timer_adapter_id = adapter_id,
+ .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ .timer_tick_ns = NSECPERSEC / 10,
+ .max_tmo_ns = 180 * NSECPERSEC,
+ .nb_timers = MAX_TIMERS,
+ .flags = 0,
+ };
+ uint32_t caps = 0;
+
+ /* Test invalid conf */
+ adapter = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapter, "Created adapter with invalid "
+ "event device id");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for "
+ "invalid event device id");
+
+ /* Test valid conf */
+ conf.event_dev_id = evdev;
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
+ "failed to get adapter capabilities");
+ if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT))
+ adapter = rte_event_timer_adapter_create_ext(&conf,
+ test_port_conf_cb,
+ NULL);
+ else
+ adapter = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid "
+ "configuration");
+
+ /* Test existing id */
+ adapter2 = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id");
+ TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing "
+ "id");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter),
+ "Failed to free adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+
+/* Test that adapter can be freed correctly. */
+static int
+adapter_free(void)
+{
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
+ "Failed to stop adapter");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
+ "Failed to free valid adapter");
+
+ /* Test free of already freed adapter */
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
+ "Freed adapter that was already freed");
+
+ /* Test free of null adapter */
+ timdev = NULL;
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
+ "Freed null adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+/* Test that adapter info can be retrieved and is correct. */
+static int
+adapter_get_info(void)
+{
+ struct rte_event_timer_adapter_info info;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info),
+ "Failed to get adapter info");
+
+ if (using_services)
+ TEST_ASSERT_EQUAL(info.event_dev_port_id, 1,
+ "Expected port id = 1, got port id = %d",
+ info.event_dev_port_id);
+
+ return TEST_SUCCESS;
+}
+
+/* Test adapter lookup via adapter ID. */
+static int
+adapter_lookup(void)
+{
+ struct rte_event_timer_adapter *adapter;
+
+ adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID);
+ TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter");
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_start(void)
+{
+ TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC,
+ NSECPERSEC / 10),
+ "Failed to start adapter");
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_start(timdev),
+ "Failed to repeatedly start adapter");
+
+ return TEST_SUCCESS;
+}
+
+/* Test that adapter stops correctly. */
+static int
+adapter_stop(void)
+{
+ struct rte_event_timer_adapter *l_adapter = NULL;
+
+ /* Test adapter stop */
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
+ "Failed to stop event adapter");
+
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter),
+ "Erroneously stopped null event adapter");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
+ "Failed to free adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+/* Test increment and reset of ev_enq_count stat */
+static int
+stat_inc_reset_ev_enq(void)
+{
+ int ret, i, n;
+ int num_evtims = MAX_TIMERS;
+ struct rte_event_timer *evtims[num_evtims];
+ struct rte_event evs[BATCH_SIZE];
+ struct rte_event_timer_adapter_stats stats;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
+ ret);
+
+ for (i = 0; i < num_evtims; i++) {
+ *evtims[i] = init_tim;
+ evtims[i]->ev.event_ptr = evtims[i];
+ }
+
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at "
+ "startup");
+
+ /* Test with the max value for the adapter */
+ ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
+ TEST_ASSERT_EQUAL(ret, num_evtims,
+ "Failed to arm all event timers: attempted = %d, "
+ "succeeded = %d, rte_errno = %s",
+ num_evtims, ret, rte_strerror(rte_errno));
+
+ rte_delay_ms(1000);
+
+#define MAX_TRIES num_evtims
+ int sum = 0;
+ int tries = 0;
+ bool done = false;
+ while (!done) {
+ sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
+ RTE_DIM(evs), 10);
+ if (sum >= num_evtims || ++tries >= MAX_TRIES)
+ done = true;
+
+ rte_delay_ms(10);
+ }
+
+ TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
+ "got %d", num_evtims, sum);
+
+ TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
+
+ rte_delay_ms(100);
+
+ /* Make sure the eventdev is still empty */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
+ 10);
+
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
+ "events from event device");
+
+ /* Check stats again */
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims,
+ "Expected enqueue stat = %d; got %d", num_evtims,
+ (int)stats.ev_enq_count);
+
+ /* Reset and check again */
+ ret = rte_event_timer_adapter_stats_reset(timdev);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats");
+
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0,
+ "Expected enqueue stat = %d; got %d", 0,
+ (int)stats.ev_enq_count);
+
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+
+ return TEST_SUCCESS;
+}
+
+/* Test various cases in arming timers */
+static int
+event_timer_arm(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+
+ /* Test single timer arm succeeds */
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer "
+ "in incorrect state");
+
+ /* Test arm of armed timer fails */
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "expected return value from "
+ "rte_event_timer_arm_burst: 0, got: %d", ret);
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after arming already armed timer");
+
+ /* Let timer expire */
+ rte_delay_ms(1000);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* This test checks that repeated references to the same event timer in the
+ * arm request work as expected; only the first one through should succeed.
+ */
+static int
+event_timer_arm_double(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+
+ struct rte_event_timer *evtim_arr[] = {evtim, evtim};
+ ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr));
+ TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from "
+ "rte_event_timer_arm_burst");
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after double-arm");
+
+ /* Let timer expire */
+ rte_delay_ms(600);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
+ "expected: 1, actual: %d", n);
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Test the timer expiry event is generated at the expected time. */
+static int
+event_timer_arm_expiry(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event_timer *evtim2 = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up an event timer */
+ *evtim = init_tim;
+ evtim->timeout_ticks = 30, // expire in 3 secs
+ evtim->ev.event_ptr = evtim;
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
+ "timer in incorrect state");
+
+ rte_delay_ms(2999);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
+
+ /* Delay 100 ms to account for the adapter tick window - should let us
+ * dequeue one event
+ */
+ rte_delay_ms(100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
+ "expiry events", n);
+ TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
+ "Dequeued unexpected type of event");
+
+ /* Check that we recover the original event timer and then free it */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+ rte_mempool_put(eventdev_test_mempool, evtim2);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that rearming a timer works as expected. */
+static int
+event_timer_arm_rearm(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event_timer *evtim2 = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->timeout_ticks = 1; // expire in 0.1 sec
+ evtim->ev.event_ptr = evtim;
+
+ /* Arm it */
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+
+ /* Add 100ms to account for the adapter tick window */
+ rte_delay_ms(100 + 100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ /* Recover the timer through the event that was dequeued. */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+
+ /* Need to reset state in case implementation can't do it */
+ evtim2->state = RTE_EVENT_TIMER_NOT_ARMED;
+
+ /* Rearm it */
+ ret = rte_event_timer_arm_burst(timdev, &evtim2, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+
+ /* Add 100ms to account for the adapter tick window */
+ rte_delay_ms(100 + 100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ /* Free it */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+ rte_mempool_put(eventdev_test_mempool, evtim2);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that the adapter handles the max specified number of timers as
+ * expected.
+ */
+static int
+event_timer_arm_max(void)
+{
+ int ret, i, n;
+ int num_evtims = MAX_TIMERS;
+ struct rte_event_timer *evtims[num_evtims];
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
+ ret);
+
+ for (i = 0; i < num_evtims; i++) {
+ *evtims[i] = init_tim;
+ evtims[i]->ev.event_ptr = evtims[i];
+ }
+
+ /* Test with the max value for the adapter */
+ ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
+ TEST_ASSERT_EQUAL(ret, num_evtims,
+ "Failed to arm all event timers: attempted = %d, "
+ "succeeded = %d, rte_errno = %s",
+ num_evtims, ret, rte_strerror(rte_errno));
+
+ rte_delay_ms(1000);
+
+#define MAX_TRIES num_evtims
+ int sum = 0;
+ int tries = 0;
+ bool done = false;
+ while (!done) {
+ sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
+ RTE_DIM(evs), 10);
+ if (sum >= num_evtims || ++tries >= MAX_TRIES)
+ done = true;
+
+ rte_delay_ms(10);
+ }
+
+ TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
+ "got %d", num_evtims, sum);
+
+ TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
+
+ rte_delay_ms(100);
+
+ /* Make sure the eventdev is still empty */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
+ 10);
+
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
+ "events from event device");
+
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that creating an event timer with incorrect event sched type fails. */
+static int
+event_timer_arm_invalid_sched_type(void)
+{
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ if (!using_services)
+ return -ENOTSUP;
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "sched type, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid queue");
+
+ rte_mempool_put(eventdev_test_mempool, &evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that creating an event timer with a timeout value that is too small or
+ * too big fails.
+ */
+static int
+event_timer_arm_invalid_timeout(void)
+{
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 0; // timeout too small
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "timeout, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid timeout");
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY,
+ "Unexpected event timer state");
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 1801; // timeout too big
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "timeout, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid timeout");
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
+ "Unexpected event timer state");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+static int
+event_timer_cancel(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Check that cancelling an uninited timer fails */
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
+ "uninited timer");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
+ "cancelling uninited timer");
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 30; // expire in 3 sec
+
+ /* Check that cancelling an inited but unarmed timer fails */
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
+ "unarmed timer");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
+ "cancelling unarmed timer");
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
+ "evtim in incorrect state");
+
+ /* Delay 1 sec */
+ rte_delay_ms(1000);
+
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
+ "evtim in incorrect state");
+
+ rte_delay_ms(3000);
+
+ /* Make sure that no expiry event was generated */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+static int
+event_timer_cancel_double(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 30; // expire in 3 sec
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
+ "timer in unexpected state");
+
+ /* Now, test that referencing the same timer twice in the same call
+ * fails
+ */
+ struct rte_event_timer *evtim_arr[] = {evtim, evtim};
+ ret = rte_event_timer_cancel_burst(adapter, evtim_arr,
+ RTE_DIM(evtim_arr));
+
+ /* Two requests to cancel same timer, only one should succeed */
+ TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer "
+ "twice");
+
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after double-cancel: rte_errno = %d", rte_errno);
+
+ rte_delay_ms(3000);
+
+ /* Still make sure that no expiry event was generated */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that event timer adapter tick resolution works as expected by testing
+ * the number of adapter ticks that occur within a particular time interval.
+ */
+static int
+adapter_tick_resolution(void)
+{
+ struct rte_event_timer_adapter_stats stats;
+ uint64_t adapter_tick_count;
+
+ /* Only run this test in the software driver case */
+ if (!using_services)
+ return -ENOTSUP;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev),
+ "Failed to reset stats");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
+ &stats), "Failed to get adapter stats");
+ TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count "
+ "not zeroed out");
+
+ /* Delay 1 second; should let at least 10 ticks occur with the default
+ * adapter configuration used by this test.
+ */
+ rte_delay_ms(1000);
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
+ &stats), "Failed to get adapter stats");
+
+ adapter_tick_count = stats.adapter_tick_count;
+ TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12,
+ "Expected 10-12 adapter ticks, got %"PRIu64"\n",
+ adapter_tick_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_create_max(void)
+{
+ int i;
+ uint32_t svc_start_count, svc_end_count;
+ struct rte_event_timer_adapter *adapters[
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1];
+
+ struct rte_event_timer_adapter_conf conf = {
+ .event_dev_id = evdev,
+ // timer_adapter_id set in loop
+ .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ .timer_tick_ns = NSECPERSEC / 10,
+ .max_tmo_ns = 180 * NSECPERSEC,
+ .nb_timers = MAX_TIMERS,
+ .flags = 0,
+ };
+
+ if (!using_services)
+ return -ENOTSUP;
+
+ svc_start_count = rte_service_get_count();
+
+ /* This test expects that there are sufficient service IDs available
+ * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to
+ * be less than RTE_SERVICE_NUM_MAX if anything else uses a service
+ * (the SW event device, for example).
+ */
+ for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) {
+ conf.timer_adapter_id = i;
+ adapters[i] = rte_event_timer_adapter_create_ext(&conf,
+ test_port_conf_cb, NULL);
+ TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter "
+ "%d", i);
+ }
+
+ conf.timer_adapter_id = i;
+ adapters[i] = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapters[i], "Created too many adapters");
+
+ /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services
+ * have been created
+ */
+ svc_end_count = rte_service_get_count();
+ TEST_ASSERT_EQUAL(svc_end_count - svc_start_count,
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+ "Failed to create expected number of services");
+
+ for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]),
+ "Failed to free adapter %d", i);
+
+ /* Check that service count is back to where it was at start */
+ svc_end_count = rte_service_get_count();
+ TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release "
+ "correct number of services");
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite event_timer_adptr_functional_testsuite = {
+ .suite_name = "event timer functional test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_state),
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_arm),
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_arm_burst),
+ TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
+ test_timer_cancel),
+ TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
+ test_timer_cancel_random),
+ TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
+ test_timer_arm_multicore),
+ TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
+ test_timer_arm_burst_multicore),
+ TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
+ test_timer_cancel_multicore),
+ TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
+ test_timer_cancel_burst_multicore),
+ TEST_CASE(adapter_create),
+ TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_get_info),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_lookup),
+ TEST_CASE_ST(NULL, timdev_teardown,
+ adapter_start),
+ TEST_CASE_ST(timdev_setup_msec, NULL,
+ adapter_stop),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ stat_inc_reset_ev_enq),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_double),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_expiry),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_rearm),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_max),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_invalid_sched_type),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_invalid_timeout),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_cancel),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_cancel_double),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_tick_resolution),
+ TEST_CASE(adapter_create_max),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_timer_adapter_func(void)
+{
+ return unit_test_suite_runner(&event_timer_adptr_functional_testsuite);
+}
+
+REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func);
diff --git a/test/test/test_fbarray.c b/test/test/test_fbarray.c
new file mode 100644
index 00000000..8c44e2c7
--- /dev/null
+++ b/test/test/test_fbarray.c
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <limits.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_errno.h>
+#include <rte_fbarray.h>
+
+#include "test.h"
+
+struct fbarray_testsuite_params {
+ struct rte_fbarray arr;
+ int start;
+ int end;
+};
+
+static struct fbarray_testsuite_params param;
+
+#define FBARRAY_TEST_ARR_NAME "fbarray_autotest"
+#define FBARRAY_TEST_LEN 256
+#define FBARRAY_TEST_ELT_SZ (sizeof(int))
+
+static int autotest_setup(void)
+{
+ return rte_fbarray_init(&param.arr, FBARRAY_TEST_ARR_NAME,
+ FBARRAY_TEST_LEN, FBARRAY_TEST_ELT_SZ);
+}
+
+static void autotest_teardown(void)
+{
+ rte_fbarray_destroy(&param.arr);
+}
+
+static int init_array(void)
+{
+ int i;
+ for (i = param.start; i <= param.end; i++) {
+ if (rte_fbarray_set_used(&param.arr, i))
+ return -1;
+ }
+ return 0;
+}
+
+static void reset_array(void)
+{
+ int i;
+ for (i = 0; i < FBARRAY_TEST_LEN; i++)
+ rte_fbarray_set_free(&param.arr, i);
+}
+
+static int first_msk_test_setup(void)
+{
+ /* put all within first mask */
+ param.start = 3;
+ param.end = 10;
+ return init_array();
+}
+
+static int cross_msk_test_setup(void)
+{
+ /* put all within second and third mask */
+ param.start = 70;
+ param.end = 160;
+ return init_array();
+}
+
+static int multi_msk_test_setup(void)
+{
+ /* put all within first and last mask */
+ param.start = 3;
+ param.end = FBARRAY_TEST_LEN - 20;
+ return init_array();
+}
+
+static int last_msk_test_setup(void)
+{
+ /* put all within last mask */
+ param.start = FBARRAY_TEST_LEN - 20;
+ param.end = FBARRAY_TEST_LEN - 1;
+ return init_array();
+}
+
+static int full_msk_test_setup(void)
+{
+ /* fill entire mask */
+ param.start = 0;
+ param.end = FBARRAY_TEST_LEN - 1;
+ return init_array();
+}
+
+static int empty_msk_test_setup(void)
+{
+ /* do not fill anything in */
+ reset_array();
+ return 0;
+}
+
+static int test_invalid(void)
+{
+ struct rte_fbarray dummy;
+
+ /* invalid parameters */
+ TEST_ASSERT_FAIL(rte_fbarray_attach(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_detach(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_FAIL(rte_fbarray_destroy(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno valuey\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(NULL, "fail", 16, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, NULL, 16, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", 0, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", 16, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ /* len must not be greater than INT_MAX */
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", INT_MAX + 1U, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_NULL(rte_fbarray_get(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_idx(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_set_free(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_set_used(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_contig_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_contig_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_rev_contig_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_rev_contig_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_is_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_SUCCESS(rte_fbarray_init(&dummy, "success",
+ FBARRAY_TEST_LEN, 8),
+ "Failed to initialize valid fbarray\n");
+
+ /* test API for handling invalid parameters with a valid fbarray */
+ TEST_ASSERT_NULL(rte_fbarray_get(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_idx(&dummy, NULL) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_set_free(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_set_used(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_contig_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_contig_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_rev_contig_free(&dummy,
+ FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_rev_contig_used(&dummy,
+ FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_is_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_SUCCESS(rte_fbarray_destroy(&dummy),
+ "Failed to destroy valid fbarray\n");
+
+ return TEST_SUCCESS;
+}
+
+static int check_free(void)
+{
+ const int idx = 0;
+ const int last_idx = FBARRAY_TEST_LEN - 1;
+
+ /* ensure we can find a free spot */
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_free(&param.arr, idx), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(&param.arr, idx, 1), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(&param.arr, idx),
+ FBARRAY_TEST_LEN,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(&param.arr, idx), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, idx, 1), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr, idx), 1,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(&param.arr, last_idx),
+ last_idx, "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, last_idx, 1),
+ last_idx, "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr,
+ last_idx), FBARRAY_TEST_LEN,
+ "Free space not found where expected\n");
+
+ /* ensure we can't find any used spots */
+ TEST_ASSERT(rte_fbarray_find_next_used(&param.arr, idx) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&param.arr, idx, 1) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx), 0,
+ "Used space found where none was expected\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_used(&param.arr, last_idx) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), 0,
+ "Used space found where none was expected\n");
+
+ return 0;
+}
+
+static int check_used_one(void)
+{
+ const int idx = 0;
+ const int last_idx = FBARRAY_TEST_LEN - 1;
+
+ /* check that we can find used spots now */
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_used(&param.arr, idx), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(&param.arr, idx, 1), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx), 1,
+ "Used space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(&param.arr, last_idx), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1),
+ idx, "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr, idx), 1,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), idx,
+ "Used space not found where expected\n");
+
+ /* check if further indices are still free */
+ TEST_ASSERT(rte_fbarray_find_next_used(&param.arr, idx + 1) < 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&param.arr, idx + 1, 1) < 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx + 1), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(&param.arr, idx + 1),
+ FBARRAY_TEST_LEN - 1,
+ "Used space not found where none was expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(&param.arr, last_idx), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1),
+ 0, "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr,
+ last_idx), FBARRAY_TEST_LEN - 1,
+ "Used space not found where none was expected\n");
+
+ return 0;
+}
+
+static int test_basic(void)
+{
+ const int idx = 0;
+ int i;
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 0, "Wrong element count\n");
+
+ /* ensure we can find a free spot */
+ if (check_free())
+ return TEST_FAILED;
+
+ /* check if used */
+ TEST_ASSERT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space found where not expected\n");
+
+ /* mark as used */
+ TEST_ASSERT_SUCCESS(rte_fbarray_set_used(&param.arr, idx),
+ "Failed to set as used\n");
+
+ /* check if used again */
+ TEST_ASSERT_NOT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space not found where expected\n");
+
+ if (check_used_one())
+ return TEST_FAILED;
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 1, "Wrong element count\n");
+
+ /* check if getting pointers works for every element */
+ for (i = 0; i < FBARRAY_TEST_LEN; i++) {
+ void *td = rte_fbarray_get(&param.arr, i);
+ TEST_ASSERT_NOT_NULL(td, "Invalid pointer returned\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_idx(&param.arr, td), i,
+ "Wrong index returned\n");
+ }
+
+ /* mark as free */
+ TEST_ASSERT_SUCCESS(rte_fbarray_set_free(&param.arr, idx),
+ "Failed to set as free\n");
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 0, "Wrong element count\n");
+
+ /* check if used */
+ TEST_ASSERT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space found where not expected\n");
+
+ if (check_free())
+ return TEST_FAILED;
+
+ reset_array();
+
+ return TEST_SUCCESS;
+}
+
+static int ensure_correct(struct rte_fbarray *arr, int first, int last,
+ bool used)
+{
+ int i, len = last - first + 1;
+ for (i = 0; i < len; i++) {
+ int cur = first + i;
+ int cur_len = len - i;
+
+ if (used) {
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(arr,
+ cur), cur_len,
+ "Used space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(arr,
+ last), len,
+ "Used space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(arr,
+ cur), i + 1,
+ "Used space length is wrong\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_used(arr, cur),
+ cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(arr,
+ cur, 1), cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(arr, cur,
+ cur_len), cur,
+ "Used space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(arr, cur),
+ cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(arr,
+ last, cur_len), cur,
+ "Used space not found where expected\n");
+ } else {
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(arr,
+ cur), cur_len,
+ "Free space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+ last), len,
+ "Free space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+ cur), i + 1,
+ "Free space length is wrong\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_free(arr, cur),
+ cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(arr, cur,
+ 1), cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(arr, cur,
+ cur_len), cur,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(arr, cur),
+ cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(arr,
+ last, cur_len), cur,
+ "Free space not found where expected\n");
+ }
+ }
+ return 0;
+}
+
+static int test_find(void)
+{
+ TEST_ASSERT_EQUAL((int)param.arr.count, param.end - param.start + 1,
+ "Wrong element count\n");
+ /* ensure space is free before start */
+ if (ensure_correct(&param.arr, 0, param.start - 1, false))
+ return TEST_FAILED;
+ /* ensure space is occupied where it's supposed to be */
+ if (ensure_correct(&param.arr, param.start, param.end, true))
+ return TEST_FAILED;
+ /* ensure space after end is free as well */
+ if (ensure_correct(&param.arr, param.end + 1, FBARRAY_TEST_LEN - 1,
+ false))
+ return TEST_FAILED;
+ return TEST_SUCCESS;
+}
+
+static int test_empty(void)
+{
+ TEST_ASSERT_EQUAL((int)param.arr.count, 0, "Wrong element count\n");
+ /* ensure space is free */
+ if (ensure_correct(&param.arr, 0, FBARRAY_TEST_LEN - 1, false))
+ return TEST_FAILED;
+ return TEST_SUCCESS;
+}
+
+
+static struct unit_test_suite fbarray_test_suite = {
+ .suite_name = "fbarray autotest",
+ .setup = autotest_setup,
+ .teardown = autotest_teardown,
+ .unit_test_cases = {
+ TEST_CASE(test_invalid),
+ TEST_CASE(test_basic),
+ TEST_CASE_ST(first_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(cross_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(multi_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(last_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(full_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(empty_msk_test_setup, reset_array, test_empty),
+ TEST_CASES_END()
+ }
+};
+
+static int
+test_fbarray(void)
+{
+ return unit_test_suite_runner(&fbarray_test_suite);
+}
+
+REGISTER_TEST_COMMAND(fbarray_autotest, test_fbarray);
diff --git a/test/test/test_flow_classify.c b/test/test/test_flow_classify.c
index fc83b69a..5f5beeee 100644
--- a/test/test/test_flow_classify.c
+++ b/test/test/test_flow_classify.c
@@ -871,32 +871,32 @@ test_flow_classify(void)
printf("Line %i: f_create has failed!\n", __LINE__);
rte_flow_classifier_free(cls->cls);
rte_free(cls);
- return -1;
+ return TEST_FAILED;
}
printf("Created table_acl for for IPv4 five tuple packets\n");
ret = init_mbufpool();
if (ret) {
printf("Line %i: init_mbufpool has failed!\n", __LINE__);
- return -1;
+ return TEST_FAILED;
}
if (test_invalid_parameters() < 0)
- return -1;
+ return TEST_FAILED;
if (test_valid_parameters() < 0)
- return -1;
+ return TEST_FAILED;
if (test_invalid_patterns() < 0)
- return -1;
+ return TEST_FAILED;
if (test_invalid_actions() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_udp() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_tcp() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_sctp() < 0)
- return -1;
+ return TEST_FAILED;
- return 0;
+ return TEST_SUCCESS;
}
REGISTER_TEST_COMMAND(flow_classify_autotest, test_flow_classify);
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index edf41f5a..b3db9fd1 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -1103,6 +1103,7 @@ static int test_average_table_utilization(void)
unsigned i, j;
unsigned added_keys, average_keys_added = 0;
int ret;
+ unsigned int cnt;
printf("\n# Running test to determine average utilization"
"\n before adding elements begins to fail\n");
@@ -1121,13 +1122,24 @@ static int test_average_table_utilization(void)
for (i = 0; i < ut_params.key_len; i++)
simple_key[i] = rte_rand() % 255;
ret = rte_hash_add_key(handle, simple_key);
+ if (ret < 0)
+ break;
}
+
if (ret != -ENOSPC) {
printf("Unexpected error when adding keys\n");
rte_hash_free(handle);
return -1;
}
+ cnt = rte_hash_count(handle);
+ if (cnt != added_keys) {
+ printf("rte_hash_count returned wrong value %u, %u,"
+ "%u\n", j, added_keys, cnt);
+ rte_hash_free(handle);
+ return -1;
+ }
+
average_keys_added += added_keys;
/* Reset the table */
diff --git a/test/test/test_hash_multiwriter.c b/test/test/test_hash_multiwriter.c
index ef5fce3d..6a3eb10b 100644
--- a/test/test/test_hash_multiwriter.c
+++ b/test/test/test_hash_multiwriter.c
@@ -48,18 +48,29 @@ static rte_atomic64_t ginsertions;
static int use_htm;
static int
-test_hash_multiwriter_worker(__attribute__((unused)) void *arg)
+test_hash_multiwriter_worker(void *arg)
{
uint64_t i, offset;
+ uint16_t pos_core;
uint32_t lcore_id = rte_lcore_id();
uint64_t begin, cycles;
+ uint16_t *enabled_core_ids = (uint16_t *)arg;
- offset = (lcore_id - rte_get_master_lcore())
- * tbl_multiwriter_test_params.nb_tsx_insertion;
+ for (pos_core = 0; pos_core < rte_lcore_count(); pos_core++) {
+ if (enabled_core_ids[pos_core] == lcore_id)
+ break;
+ }
+
+ /*
+ * Calculate offset for entries based on the position of the
+ * logical core, from the master core (not counting not enabled cores)
+ */
+ offset = pos_core * tbl_multiwriter_test_params.nb_tsx_insertion;
printf("Core #%d inserting %d: %'"PRId64" - %'"PRId64"\n",
lcore_id, tbl_multiwriter_test_params.nb_tsx_insertion,
- offset, offset + tbl_multiwriter_test_params.nb_tsx_insertion);
+ offset,
+ offset + tbl_multiwriter_test_params.nb_tsx_insertion - 1);
begin = rte_rdtsc_precise();
@@ -88,6 +99,8 @@ test_hash_multiwriter(void)
{
unsigned int i, rounded_nb_total_tsx_insertion;
static unsigned calledCount = 1;
+ uint16_t enabled_core_ids[RTE_MAX_LCORE];
+ uint16_t core_id;
uint32_t *keys;
uint32_t *found;
@@ -116,6 +129,7 @@ test_hash_multiwriter(void)
uint32_t duplicated_keys = 0;
uint32_t lost_keys = 0;
+ uint32_t count;
snprintf(name, 32, "test%u", calledCount++);
hash_params.name = name;
@@ -140,16 +154,17 @@ test_hash_multiwriter(void)
goto err1;
}
+ for (i = 0; i < nb_entries; i++)
+ keys[i] = i;
+
+ tbl_multiwriter_test_params.keys = keys;
+
found = rte_zmalloc(NULL, sizeof(uint32_t) * nb_entries, 0);
if (found == NULL) {
printf("RTE_ZMALLOC failed\n");
goto err2;
}
- for (i = 0; i < nb_entries; i++)
- keys[i] = i;
-
- tbl_multiwriter_test_params.keys = keys;
tbl_multiwriter_test_params.found = found;
rte_atomic64_init(&gcycles);
@@ -158,11 +173,36 @@ test_hash_multiwriter(void)
rte_atomic64_init(&ginsertions);
rte_atomic64_clear(&ginsertions);
+ /* Get list of enabled cores */
+ i = 0;
+ for (core_id = 0; core_id < RTE_MAX_LCORE; core_id++) {
+ if (i == rte_lcore_count())
+ break;
+
+ if (rte_lcore_is_enabled(core_id)) {
+ enabled_core_ids[i] = core_id;
+ i++;
+ }
+ }
+
+ if (i != rte_lcore_count()) {
+ printf("Number of enabled cores in list is different from "
+ "number given by rte_lcore_count()\n");
+ goto err3;
+ }
+
/* Fire all threads. */
rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
- NULL, CALL_MASTER);
+ enabled_core_ids, CALL_MASTER);
rte_eal_mp_wait_lcore();
+ count = rte_hash_count(handle);
+ if (count != rounded_nb_total_tsx_insertion) {
+ printf("rte_hash_count returned wrong value %u, %d\n",
+ rounded_nb_total_tsx_insertion, count);
+ goto err3;
+ }
+
while (rte_hash_iterate(handle, &next_key, &next_data, &iter) >= 0) {
/* Search for the key in the list of keys added .*/
i = *(const uint32_t *)next_key;
diff --git a/test/test/test_hash_perf.c b/test/test/test_hash_perf.c
index a81d0c79..33dcb9fc 100644
--- a/test/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
@@ -76,7 +76,8 @@ static struct rte_hash_parameters ut_params = {
};
static int
-create_table(unsigned with_data, unsigned table_index)
+create_table(unsigned int with_data, unsigned int table_index,
+ unsigned int with_locks)
{
char name[RTE_HASH_NAMESIZE];
@@ -86,6 +87,14 @@ create_table(unsigned with_data, unsigned table_index)
else
sprintf(name, "test_hash%d", hashtest_key_lens[table_index]);
+
+ if (with_locks)
+ ut_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT
+ | RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+ else
+ ut_params.extra_flag = 0;
+
ut_params.name = name;
ut_params.key_len = hashtest_key_lens[table_index];
ut_params.socket_id = rte_socket_id();
@@ -459,7 +468,7 @@ reset_table(unsigned table_index)
}
static int
-run_all_tbl_perf_tests(unsigned with_pushes)
+run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks)
{
unsigned i, j, with_data, with_hash;
@@ -468,7 +477,7 @@ run_all_tbl_perf_tests(unsigned with_pushes)
for (with_data = 0; with_data <= 1; with_data++) {
for (i = 0; i < NUM_KEYSIZES; i++) {
- if (create_table(with_data, i) < 0)
+ if (create_table(with_data, i, with_locks) < 0)
return -1;
if (get_input_keys(with_pushes, i) < 0)
@@ -611,15 +620,20 @@ fbk_hash_perf_test(void)
static int
test_hash_perf(void)
{
- unsigned with_pushes;
-
- for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
- if (with_pushes == 0)
- printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
+ unsigned int with_pushes, with_locks;
+ for (with_locks = 0; with_locks <= 1; with_locks++) {
+ if (with_locks)
+ printf("\nWith locks in the code\n");
else
- printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
- if (run_all_tbl_perf_tests(with_pushes) < 0)
- return -1;
+ printf("\nWithout locks in the code\n");
+ for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
+ if (with_pushes == 0)
+ printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
+ else
+ printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
+ if (run_all_tbl_perf_tests(with_pushes, with_locks) < 0)
+ return -1;
+ }
}
if (fbk_hash_perf_test() < 0)
return -1;
diff --git a/test/test/test_hash_readwrite.c b/test/test/test_hash_readwrite.c
new file mode 100644
index 00000000..55ae33d8
--- /dev/null
+++ b/test/test/test_hash_readwrite.c
@@ -0,0 +1,637 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <locale.h>
+
+#include <rte_cycles.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_jhash.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_spinlock.h>
+
+#include "test.h"
+
+#define RTE_RWTEST_FAIL 0
+
+#define TOTAL_ENTRY (16*1024*1024)
+#define TOTAL_INSERT (15*1024*1024)
+
+#define NUM_TEST 3
+unsigned int core_cnt[NUM_TEST] = {2, 4, 8};
+
+struct perf {
+ uint32_t single_read;
+ uint32_t single_write;
+ uint32_t read_only[NUM_TEST];
+ uint32_t write_only[NUM_TEST];
+ uint32_t read_write_r[NUM_TEST];
+ uint32_t read_write_w[NUM_TEST];
+};
+
+static struct perf htm_results, non_htm_results;
+
+struct {
+ uint32_t *keys;
+ uint32_t *found;
+ uint32_t num_insert;
+ uint32_t rounded_tot_insert;
+ struct rte_hash *h;
+} tbl_rw_test_param;
+
+static rte_atomic64_t gcycles;
+static rte_atomic64_t ginsertions;
+
+static rte_atomic64_t gread_cycles;
+static rte_atomic64_t gwrite_cycles;
+
+static rte_atomic64_t greads;
+static rte_atomic64_t gwrites;
+
+static int
+test_hash_readwrite_worker(__attribute__((unused)) void *arg)
+{
+ uint64_t i, offset;
+ uint32_t lcore_id = rte_lcore_id();
+ uint64_t begin, cycles;
+ int ret;
+
+ offset = (lcore_id - rte_get_master_lcore())
+ * tbl_rw_test_param.num_insert;
+
+ printf("Core #%d inserting and reading %d: %'"PRId64" - %'"PRId64"\n",
+ lcore_id, tbl_rw_test_param.num_insert,
+ offset, offset + tbl_rw_test_param.num_insert);
+
+ begin = rte_rdtsc_precise();
+
+ for (i = offset; i < offset + tbl_rw_test_param.num_insert; i++) {
+
+ if (rte_hash_lookup(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i) > 0)
+ break;
+
+ ret = rte_hash_add_key(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i);
+ if (ret < 0)
+ break;
+
+ if (rte_hash_lookup(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i) != ret)
+ break;
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gcycles, cycles);
+ rte_atomic64_add(&ginsertions, i - offset);
+
+ for (; i < offset + tbl_rw_test_param.num_insert; i++)
+ tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
+
+ return 0;
+}
+
+static int
+init_params(int use_htm, int use_jhash)
+{
+ unsigned int i;
+
+ uint32_t *keys = NULL;
+ uint32_t *found = NULL;
+ struct rte_hash *handle;
+
+ struct rte_hash_parameters hash_params = {
+ .entries = TOTAL_ENTRY,
+ .key_len = sizeof(uint32_t),
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+ if (use_jhash)
+ hash_params.hash_func = rte_jhash;
+ else
+ hash_params.hash_func = rte_hash_crc;
+
+ if (use_htm)
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT |
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+ else
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+
+ hash_params.name = "tests";
+
+ handle = rte_hash_create(&hash_params);
+ if (handle == NULL) {
+ printf("hash creation failed");
+ return -1;
+ }
+
+ tbl_rw_test_param.h = handle;
+ keys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
+
+ if (keys == NULL) {
+ printf("RTE_MALLOC failed\n");
+ goto err;
+ }
+
+ found = rte_zmalloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
+ if (found == NULL) {
+ printf("RTE_ZMALLOC failed\n");
+ goto err;
+ }
+
+ tbl_rw_test_param.keys = keys;
+ tbl_rw_test_param.found = found;
+
+ for (i = 0; i < TOTAL_ENTRY; i++)
+ keys[i] = i;
+
+ return 0;
+
+err:
+ rte_free(keys);
+ rte_hash_free(handle);
+
+ return -1;
+}
+
+static int
+test_hash_readwrite_functional(int use_htm)
+{
+ unsigned int i;
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ uint32_t duplicated_keys = 0;
+ uint32_t lost_keys = 0;
+ int use_jhash = 1;
+
+ rte_atomic64_init(&gcycles);
+ rte_atomic64_clear(&gcycles);
+
+ rte_atomic64_init(&ginsertions);
+ rte_atomic64_clear(&ginsertions);
+
+ if (init_params(use_htm, use_jhash) != 0)
+ goto err;
+
+ tbl_rw_test_param.num_insert =
+ TOTAL_INSERT / rte_lcore_count();
+
+ tbl_rw_test_param.rounded_tot_insert =
+ tbl_rw_test_param.num_insert
+ * rte_lcore_count();
+
+ printf("++++++++Start function tests:+++++++++\n");
+
+ /* Fire all threads. */
+ rte_eal_mp_remote_launch(test_hash_readwrite_worker,
+ NULL, CALL_MASTER);
+ rte_eal_mp_wait_lcore();
+
+ while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
+ &next_data, &iter) >= 0) {
+ /* Search for the key in the list of keys added .*/
+ i = *(const uint32_t *)next_key;
+ tbl_rw_test_param.found[i]++;
+ }
+
+ for (i = 0; i < tbl_rw_test_param.rounded_tot_insert; i++) {
+ if (tbl_rw_test_param.keys[i] != RTE_RWTEST_FAIL) {
+ if (tbl_rw_test_param.found[i] > 1) {
+ duplicated_keys++;
+ break;
+ }
+ if (tbl_rw_test_param.found[i] == 0) {
+ lost_keys++;
+ printf("key %d is lost\n", i);
+ break;
+ }
+ }
+ }
+
+ if (duplicated_keys > 0) {
+ printf("%d key duplicated\n", duplicated_keys);
+ goto err_free;
+ }
+
+ if (lost_keys > 0) {
+ printf("%d key lost\n", lost_keys);
+ goto err_free;
+ }
+
+ printf("No key corrupted during read-write test.\n");
+
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gcycles) /
+ rte_atomic64_read(&ginsertions);
+
+ printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
+
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+ printf("+++++++++Complete function tests+++++++++\n");
+ return 0;
+
+err_free:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+err:
+ return -1;
+}
+
+static int
+test_rw_reader(__attribute__((unused)) void *arg)
+{
+ uint64_t i;
+ uint64_t begin, cycles;
+ uint64_t read_cnt = (uint64_t)((uintptr_t)arg);
+
+ begin = rte_rdtsc_precise();
+ for (i = 0; i < read_cnt; i++) {
+ void *data;
+ rte_hash_lookup_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ &data);
+ if (i != (uint64_t)(uintptr_t)data) {
+ printf("lookup find wrong value %"PRIu64","
+ "%"PRIu64"\n", i,
+ (uint64_t)(uintptr_t)data);
+ break;
+ }
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gread_cycles, cycles);
+ rte_atomic64_add(&greads, i);
+ return 0;
+}
+
+static int
+test_rw_writer(__attribute__((unused)) void *arg)
+{
+ uint64_t i;
+ uint32_t lcore_id = rte_lcore_id();
+ uint64_t begin, cycles;
+ int ret;
+ uint64_t start_coreid = (uint64_t)(uintptr_t)arg;
+ uint64_t offset;
+
+ offset = TOTAL_INSERT / 2 + (lcore_id - start_coreid)
+ * tbl_rw_test_param.num_insert;
+ begin = rte_rdtsc_precise();
+ for (i = offset; i < offset + tbl_rw_test_param.num_insert; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("writer failed %"PRIu64"\n", i);
+ break;
+ }
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gwrite_cycles, cycles);
+ rte_atomic64_add(&gwrites, tbl_rw_test_param.num_insert);
+ return 0;
+}
+
+static int
+test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
+ int reader_faster)
+{
+ unsigned int n;
+ int ret;
+ int start_coreid;
+ uint64_t i, read_cnt;
+
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ int use_jhash = 0;
+
+ uint32_t duplicated_keys = 0;
+ uint32_t lost_keys = 0;
+
+ uint64_t start = 0, end = 0;
+
+ rte_atomic64_init(&greads);
+ rte_atomic64_init(&gwrites);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&greads);
+
+ rte_atomic64_init(&gread_cycles);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_init(&gwrite_cycles);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ if (init_params(use_htm, use_jhash) != 0)
+ goto err;
+
+ /*
+ * Do a readers finish faster or writers finish faster test.
+ * When readers finish faster, we timing the readers, and when writers
+ * finish faster, we timing the writers.
+ * Divided by 10 or 2 is just experimental values to vary the workload
+ * of readers.
+ */
+ if (reader_faster) {
+ printf("++++++Start perf test: reader++++++++\n");
+ read_cnt = TOTAL_INSERT / 10;
+ } else {
+ printf("++++++Start perf test: writer++++++++\n");
+ read_cnt = TOTAL_INSERT / 2;
+ }
+
+ /* We first test single thread performance */
+ start = rte_rdtsc_precise();
+ /* Insert half of the keys */
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+ end = rte_rdtsc_precise() - start;
+ perf_results->single_write = end / i;
+
+ start = rte_rdtsc_precise();
+
+ for (i = 0; i < read_cnt; i++) {
+ void *data;
+ rte_hash_lookup_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ &data);
+ if (i != (uint64_t)(uintptr_t)data) {
+ printf("lookup find wrong value"
+ " %"PRIu64",%"PRIu64"\n", i,
+ (uint64_t)(uintptr_t)data);
+ break;
+ }
+ }
+ end = rte_rdtsc_precise() - start;
+ perf_results->single_read = end / i;
+
+ for (n = 0; n < NUM_TEST; n++) {
+ unsigned int tot_lcore = rte_lcore_count();
+ if (tot_lcore < core_cnt[n] * 2 + 1)
+ goto finish;
+
+ rte_atomic64_clear(&greads);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ rte_hash_reset(tbl_rw_test_param.h);
+
+ tbl_rw_test_param.num_insert = TOTAL_INSERT / 2 / core_cnt[n];
+ tbl_rw_test_param.rounded_tot_insert = TOTAL_INSERT / 2 +
+ tbl_rw_test_param.num_insert *
+ core_cnt[n];
+
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+
+ /* Then test multiple thread case but only all reads or
+ * all writes
+ */
+
+ /* Test only reader cases */
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+
+ rte_eal_mp_wait_lcore();
+
+ start_coreid = i;
+ /* Test only writer cases */
+ for (; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+
+ rte_eal_mp_wait_lcore();
+
+ if (reader_faster) {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gread_cycles) /
+ rte_atomic64_read(&greads);
+ perf_results->read_only[n] = cycles_per_insertion;
+ printf("Reader only: cycles per lookup: %llu\n",
+ cycles_per_insertion);
+ }
+
+ else {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gwrite_cycles) /
+ rte_atomic64_read(&gwrites);
+ perf_results->write_only[n] = cycles_per_insertion;
+ printf("Writer only: cycles per writes: %llu\n",
+ cycles_per_insertion);
+ }
+
+ rte_atomic64_clear(&greads);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ rte_hash_reset(tbl_rw_test_param.h);
+
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+
+ start_coreid = core_cnt[n] + 1;
+
+ if (reader_faster) {
+ for (i = core_cnt[n] + 1; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+ } else {
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+ for (; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ while (rte_hash_iterate(tbl_rw_test_param.h,
+ &next_key, &next_data, &iter) >= 0) {
+ /* Search for the key in the list of keys added .*/
+ i = *(const uint32_t *)next_key;
+ tbl_rw_test_param.found[i]++;
+ }
+
+ for (i = 0; i < tbl_rw_test_param.rounded_tot_insert; i++) {
+ if (tbl_rw_test_param.keys[i] != RTE_RWTEST_FAIL) {
+ if (tbl_rw_test_param.found[i] > 1) {
+ duplicated_keys++;
+ break;
+ }
+ if (tbl_rw_test_param.found[i] == 0) {
+ lost_keys++;
+ printf("key %"PRIu64" is lost\n", i);
+ break;
+ }
+ }
+ }
+
+ if (duplicated_keys > 0) {
+ printf("%d key duplicated\n", duplicated_keys);
+ goto err_free;
+ }
+
+ if (lost_keys > 0) {
+ printf("%d key lost\n", lost_keys);
+ goto err_free;
+ }
+
+ printf("No key corrupted during read-write test.\n");
+
+ if (reader_faster) {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gread_cycles) /
+ rte_atomic64_read(&greads);
+ perf_results->read_write_r[n] = cycles_per_insertion;
+ printf("Read-write cycles per lookup: %llu\n",
+ cycles_per_insertion);
+ }
+
+ else {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gwrite_cycles) /
+ rte_atomic64_read(&gwrites);
+ perf_results->read_write_w[n] = cycles_per_insertion;
+ printf("Read-write cycles per writes: %llu\n",
+ cycles_per_insertion);
+ }
+ }
+
+finish:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+ return 0;
+
+err_free:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+
+err:
+ return -1;
+}
+
+static int
+test_hash_readwrite_main(void)
+{
+ /*
+ * Variables used to choose different tests.
+ * use_htm indicates if hardware transactional memory should be used.
+ * reader_faster indicates if the reader threads should finish earlier
+ * than writer threads. This is to timing either reader threads or
+ * writer threads for performance numbers.
+ */
+ int use_htm, reader_faster;
+
+ if (rte_lcore_count() == 1) {
+ printf("More than one lcore is required "
+ "to do read write test\n");
+ return 0;
+ }
+
+ setlocale(LC_NUMERIC, "");
+
+ if (rte_tm_supported()) {
+ printf("Hardware transactional memory (lock elision) "
+ "is supported\n");
+
+ printf("Test read-write with Hardware transactional memory\n");
+
+ use_htm = 1;
+ if (test_hash_readwrite_functional(use_htm) < 0)
+ return -1;
+
+ reader_faster = 1;
+ if (test_hash_readwrite_perf(&htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+
+ reader_faster = 0;
+ if (test_hash_readwrite_perf(&htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+ } else {
+ printf("Hardware transactional memory (lock elision) "
+ "is NOT supported\n");
+ }
+
+ printf("Test read-write without Hardware transactional memory\n");
+ use_htm = 0;
+ if (test_hash_readwrite_functional(use_htm) < 0)
+ return -1;
+ reader_faster = 1;
+ if (test_hash_readwrite_perf(&non_htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+ reader_faster = 0;
+ if (test_hash_readwrite_perf(&non_htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+
+ printf("Results summary:\n");
+
+ int i;
+
+ printf("single read: %u\n", htm_results.single_read);
+ printf("single write: %u\n", htm_results.single_write);
+ for (i = 0; i < NUM_TEST; i++) {
+ printf("core_cnt: %u\n", core_cnt[i]);
+ printf("HTM:\n");
+ printf("read only: %u\n", htm_results.read_only[i]);
+ printf("write only: %u\n", htm_results.write_only[i]);
+ printf("read-write read: %u\n", htm_results.read_write_r[i]);
+ printf("read-write write: %u\n", htm_results.read_write_w[i]);
+
+ printf("non HTM:\n");
+ printf("read only: %u\n", non_htm_results.read_only[i]);
+ printf("write only: %u\n", non_htm_results.write_only[i]);
+ printf("read-write read: %u\n",
+ non_htm_results.read_write_r[i]);
+ printf("read-write write: %u\n",
+ non_htm_results.read_write_w[i]);
+ }
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(hash_readwrite_autotest, test_hash_readwrite_main);
diff --git a/test/test/test_interrupts.c b/test/test/test_interrupts.c
index 31a70a0c..dc19175d 100644
--- a/test/test/test_interrupts.c
+++ b/test/test/test_interrupts.c
@@ -20,6 +20,7 @@ enum test_interrupt_handle_type {
TEST_INTERRUPT_HANDLE_VALID,
TEST_INTERRUPT_HANDLE_VALID_UIO,
TEST_INTERRUPT_HANDLE_VALID_ALARM,
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT,
TEST_INTERRUPT_HANDLE_CASE1,
TEST_INTERRUPT_HANDLE_MAX
};
@@ -80,6 +81,10 @@ test_interrupt_init(void)
intr_handles[TEST_INTERRUPT_HANDLE_VALID_ALARM].type =
RTE_INTR_HANDLE_ALARM;
+ intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].fd = pfds.readfd;
+ intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].type =
+ RTE_INTR_HANDLE_DEV_EVENT;
+
intr_handles[TEST_INTERRUPT_HANDLE_CASE1].fd = pfds.writefd;
intr_handles[TEST_INTERRUPT_HANDLE_CASE1].type = RTE_INTR_HANDLE_UIO;
@@ -250,6 +255,14 @@ test_interrupt_enable(void)
return -1;
}
+ /* check with specific valid intr_handle */
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ if (rte_intr_enable(&test_intr_handle) == 0) {
+ printf("unexpectedly enable a specific intr_handle "
+ "successfully\n");
+ return -1;
+ }
+
/* check with valid handler and its type */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
if (rte_intr_enable(&test_intr_handle) < 0) {
@@ -306,6 +319,14 @@ test_interrupt_disable(void)
return -1;
}
+ /* check with specific valid intr_handle */
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ if (rte_intr_disable(&test_intr_handle) == 0) {
+ printf("unexpectedly disable a specific intr_handle "
+ "successfully\n");
+ return -1;
+ }
+
/* check with valid handler and its type */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
if (rte_intr_disable(&test_intr_handle) < 0) {
@@ -393,9 +414,17 @@ test_interrupt(void)
goto out;
}
+ printf("Check valid device event interrupt full path\n");
+ if (test_interrupt_full_path_check(
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
+ printf("failure occurred during checking valid device event "
+ "interrupt full path\n");
+ goto out;
+ }
+
printf("Check valid alarm interrupt full path\n");
- if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_ALARM)
- < 0) {
+ if (test_interrupt_full_path_check(
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
printf("failure occurred during checking valid alarm "
"interrupt full path\n");
goto out;
@@ -513,6 +542,12 @@ out:
rte_intr_callback_unregister(&test_intr_handle,
test_interrupt_callback_1, (void *)-1);
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ rte_intr_callback_unregister(&test_intr_handle,
+ test_interrupt_callback, (void *)-1);
+ rte_intr_callback_unregister(&test_intr_handle,
+ test_interrupt_callback_1, (void *)-1);
+
rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL);
/* deinit */
test_interrupt_deinit();
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index e4839cdb..1b876719 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -71,11 +71,7 @@ static const struct rte_eth_txconf tx_conf = {
static const struct rte_eth_conf port_conf = {
.rxmode = {
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .jumbo_frame = 0,
- .hw_strip_crc = 1,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
@@ -357,6 +353,8 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus = NULL;
if (!mp)
return -1;
@@ -366,8 +364,13 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
/* core id 1 configured for kernel thread */
@@ -465,6 +468,8 @@ test_kni(void)
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
/* Initialize KNI subsytem */
rte_kni_init(KNI_TEST_MAX_PORTS);
@@ -480,7 +485,7 @@ test_kni(void)
return -1;
}
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0) {
printf("no supported nic port found\n");
return -1;
@@ -523,8 +528,15 @@ test_kni(void)
memset(&conf, 0, sizeof(conf));
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ else
+ bus = NULL;
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
@@ -552,8 +564,15 @@ test_kni(void)
memset(&info, 0, sizeof(info));
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ else
+ bus = NULL;
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 0ffd6509..0fe1d78e 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -43,9 +43,6 @@
#define TX_HTHRESH 0
#define TX_WTHRESH 0
#define TX_RSBIT_THRESH 32
-#define TX_Q_FLAGS (ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOVLANOFFL |\
- ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
- ETH_TXQ_FLAGS_NOXSUMTCP)
#define MBUF_CACHE_SIZE (250)
#define BURST_SIZE (32)
@@ -135,35 +132,11 @@ static uint16_t dst_port_1 = 2024;
static uint16_t vlan_id = 0x100;
-struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled. */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
- .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
- .hw_vlan_strip = 1, /**< VLAN strip enabled. */
- .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
- .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
-};
-
-struct rte_fdir_conf fdir_conf = {
- .mode = RTE_FDIR_MODE_NONE,
- .pballoc = RTE_FDIR_PBALLOC_64K,
- .status = RTE_FDIR_REPORT_STATUS,
- .drop_queue = 127,
-};
-
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .max_rx_pkt_len = ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -189,8 +162,6 @@ static struct rte_eth_txconf tx_conf_default = {
},
.tx_free_thresh = TX_FREE_THRESH,
.tx_rs_thresh = TX_RSBIT_THRESH,
- .txq_flags = TX_Q_FLAGS
-
};
static void free_virtualpmd_tx_queue(void);
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 426877a2..9163f631 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -110,11 +110,7 @@ static struct rte_eth_conf default_pmd_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -425,7 +421,7 @@ test_setup(void)
TEST_ASSERT(retval >= 0,
"Failed to create ring ethdev '%s'\n", name);
- port->port_id = rte_eth_dev_count() - 1;
+ port->port_id = rte_eth_dev_count_avail() - 1;
}
retval = configure_ethdev(port->port_id, 1);
diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index 4cc08f5a..d82de2ce 100644
--- a/test/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -83,11 +83,6 @@ static struct rte_eth_conf default_pmd_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -100,11 +95,6 @@ static struct rte_eth_conf rss_pmd_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -521,7 +511,7 @@ test_setup(void)
FOR_EACH_PORT(n, port) {
port = &test_params.slave_ports[n];
- port_id = rte_eth_dev_count();
+ port_id = rte_eth_dev_count_avail();
snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id);
retval = rte_vdev_init(name, "size=64,copy=0");
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index d23192cf..4b5abb4e 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -12,6 +12,7 @@
#include <rte_common.h>
#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
@@ -378,7 +379,7 @@ test_realloc(void)
printf("NULL pointer returned from rte_zmalloc\n");
return -1;
}
- snprintf(ptr1, size1, "%s" ,hello_str);
+ strlcpy(ptr1, hello_str, size1);
char *ptr2 = rte_realloc(ptr1, size2, RTE_CACHE_LINE_SIZE);
if (!ptr2){
rte_free(ptr1);
@@ -705,20 +706,22 @@ err_return:
return -1;
}
+static int
+check_socket_mem(const struct rte_memseg_list *msl, void *arg)
+{
+ int32_t *socket = arg;
+
+ return *socket == msl->socket_id;
+}
+
/* Check if memory is available on a specific socket */
static int
is_mem_on_socket(int32_t socket)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- unsigned i;
-
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (socket == ms[i].socket_id)
- return 1;
- }
- return 0;
+ return rte_memseg_list_walk(check_socket_mem, &socket);
}
+
/*
* Find what socket a memory address is on. Only works for addresses within
* memsegs, not heap or stack...
@@ -726,16 +729,9 @@ is_mem_on_socket(int32_t socket)
static int32_t
addr_to_socket(void * addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- unsigned i;
+ const struct rte_memseg *ms = rte_mem_virt2memseg(addr, NULL);
+ return ms == NULL ? -1 : ms->socket_id;
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if ((ms[i].addr <= addr) &&
- ((uintptr_t)addr <
- ((uintptr_t)ms[i].addr + (uintptr_t)ms[i].len)))
- return ms[i].socket_id;
- }
- return -1;
}
/* Test using rte_[c|m|zm]alloc_socket() on a specific socket */
diff --git a/test/test/test_memory.c b/test/test/test_memory.c
index 972321f1..b96bca77 100644
--- a/test/test/test_memory.c
+++ b/test/test/test_memory.c
@@ -5,8 +5,11 @@
#include <stdio.h>
#include <stdint.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
#include <rte_memory.h>
#include <rte_common.h>
+#include <rte_memzone.h>
#include "test.h"
@@ -23,12 +26,21 @@
*/
static int
+check_mem(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg __rte_unused)
+{
+ volatile uint8_t *mem = (volatile uint8_t *) ms->addr;
+ size_t i, max = ms->len;
+
+ for (i = 0; i < max; i++, mem++)
+ *mem;
+ return 0;
+}
+
+static int
test_memory(void)
{
uint64_t s;
- unsigned i;
- size_t j;
- const struct rte_memseg *mem;
/*
* dump the mapped memory: the python-expect script checks
@@ -45,14 +57,7 @@ test_memory(void)
}
/* try to read memory (should not segfault) */
- mem = rte_eal_get_physmem_layout();
- for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
-
- /* check memory */
- for (j = 0; j<mem[i].len; j++) {
- *((volatile uint8_t *) mem[i].addr + j);
- }
- }
+ rte_memseg_walk(check_mem, NULL);
return 0;
}
diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c
index 63f921e2..eebb1f24 100644
--- a/test/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -327,17 +327,17 @@ test_mempool_sp_sc(void)
}
if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
printf("Cannot lookup mempool from its name\n");
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
if (lcore_next >= RTE_MAX_LCORE) {
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
rte_spinlock_init(&scsp_spinlock);
memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
@@ -348,7 +348,10 @@ test_mempool_sp_sc(void)
if (rte_eal_wait_lcore(lcore_next) < 0)
ret = -1;
+
+err:
rte_mempool_free(mp_spsc);
+ mp_spsc = NULL;
return ret;
}
@@ -444,34 +447,6 @@ test_mempool_same_name_twice_creation(void)
return 0;
}
-/*
- * Basic test for mempool_xmem functions.
- */
-static int
-test_mempool_xmem_misc(void)
-{
- uint32_t elt_num, total_size;
- size_t sz;
- ssize_t usz;
-
- elt_num = MAX_KEEP;
- total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL);
- sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX,
- 0);
-
- usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
- MEMPOOL_PG_SHIFT_MAX, 0);
-
- if (sz != (size_t)usz) {
- printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
- "returns: %#zx, while expected: %#zx;\n",
- __func__, elt_num, total_size, sz, (size_t)usz);
- return -1;
- }
-
- return 0;
-}
-
static void
walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
{
@@ -596,9 +571,6 @@ test_mempool(void)
if (test_mempool_same_name_twice_creation() < 0)
goto err;
- if (test_mempool_xmem_misc() < 0)
- goto err;
-
/* test the stack handler */
if (test_mempool_basic(mp_stack, 1) < 0)
goto err;
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index 8ece1ac8..452d7cc5 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -104,28 +104,47 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
return 0;
}
+struct walk_arg {
+ int hugepage_2MB_avail;
+ int hugepage_1GB_avail;
+ int hugepage_16MB_avail;
+ int hugepage_16GB_avail;
+};
+static int
+find_available_pagesz(const struct rte_memseg_list *msl, void *arg)
+{
+ struct walk_arg *wa = arg;
+
+ if (msl->page_sz == RTE_PGSIZE_2M)
+ wa->hugepage_2MB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_1G)
+ wa->hugepage_1GB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_16M)
+ wa->hugepage_16MB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_16G)
+ wa->hugepage_16GB_avail = 1;
+
+ return 0;
+}
+
static int
test_memzone_reserve_flags(void)
{
const struct rte_memzone *mz;
- const struct rte_memseg *ms;
- int hugepage_2MB_avail = 0;
- int hugepage_1GB_avail = 0;
- int hugepage_16MB_avail = 0;
- int hugepage_16GB_avail = 0;
+ struct walk_arg wa;
+ int hugepage_2MB_avail, hugepage_1GB_avail;
+ int hugepage_16MB_avail, hugepage_16GB_avail;
const size_t size = 100;
- int i = 0;
- ms = rte_eal_get_physmem_layout();
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
- hugepage_2MB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
- hugepage_1GB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_16M)
- hugepage_16MB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_16G)
- hugepage_16GB_avail = 1;
- }
+
+ memset(&wa, 0, sizeof(wa));
+
+ rte_memseg_list_walk(find_available_pagesz, &wa);
+
+ hugepage_2MB_avail = wa.hugepage_2MB_avail;
+ hugepage_1GB_avail = wa.hugepage_1GB_avail;
+ hugepage_16MB_avail = wa.hugepage_16MB_avail;
+ hugepage_16GB_avail = wa.hugepage_16GB_avail;
+
/* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
if (hugepage_2MB_avail)
printf("2MB Huge pages available\n");
@@ -448,61 +467,69 @@ test_memzone_reserve_flags(void)
/* Find the heap with the greatest free block size */
static size_t
-find_max_block_free_size(const unsigned _align)
+find_max_block_free_size(unsigned int align, unsigned int socket_id)
{
struct rte_malloc_socket_stats stats;
- unsigned i, align = _align;
- size_t len = 0;
+ size_t len, overhead;
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- rte_malloc_get_socket_stats(i, &stats);
- if (stats.greatest_free_size > len)
- len = stats.greatest_free_size;
- }
+ rte_malloc_get_socket_stats(socket_id, &stats);
- if (align < RTE_CACHE_LINE_SIZE)
- align = RTE_CACHE_LINE_ROUNDUP(align+1);
+ len = stats.greatest_free_size;
+ overhead = MALLOC_ELEM_OVERHEAD;
- if (len <= MALLOC_ELEM_OVERHEAD + align)
+ if (len == 0)
return 0;
- return len - MALLOC_ELEM_OVERHEAD - align;
+ align = RTE_CACHE_LINE_ROUNDUP(align);
+ overhead += align;
+
+ if (len < overhead)
+ return 0;
+
+ return len - overhead;
}
static int
test_memzone_reserve_max(void)
{
- const struct rte_memzone *mz;
- size_t maxlen;
+ unsigned int i;
- maxlen = find_max_block_free_size(0);
+ for (i = 0; i < rte_socket_count(); i++) {
+ const struct rte_memzone *mz;
+ size_t maxlen;
+ int socket;
- if (maxlen == 0) {
- printf("There is no space left!\n");
- return 0;
- }
+ socket = rte_socket_id_by_idx(i);
+ maxlen = find_max_block_free_size(0, socket);
- mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
- SOCKET_ID_ANY, 0);
- if (mz == NULL){
- printf("Failed to reserve a big chunk of memory - %s\n",
- rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ if (maxlen == 0) {
+ printf("There is no space left!\n");
+ return 0;
+ }
- if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size did not return bigest block\n");
- printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
+ socket, 0);
+ if (mz == NULL) {
+ printf("Failed to reserve a big chunk of memory - %s\n",
+ rte_strerror(rte_errno));
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
- if (rte_memzone_free(mz)) {
- printf("Fail memzone free\n");
- return -1;
+ if (mz->len != maxlen) {
+ printf("Memzone reserve with 0 size did not return bigest block\n");
+ printf("Expected size = %zu, actual size = %zu\n",
+ maxlen, mz->len);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
}
return 0;
@@ -511,45 +538,62 @@ test_memzone_reserve_max(void)
static int
test_memzone_reserve_max_aligned(void)
{
- const struct rte_memzone *mz;
- size_t maxlen = 0;
+ unsigned int i;
- /* random alignment */
- rte_srand((unsigned)rte_rdtsc());
- const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
+ for (i = 0; i < rte_socket_count(); i++) {
+ const struct rte_memzone *mz;
+ size_t maxlen, minlen = 0;
+ int socket;
- maxlen = find_max_block_free_size(align);
+ socket = rte_socket_id_by_idx(i);
- if (maxlen == 0) {
- printf("There is no space left for biggest %u-aligned memzone!\n", align);
- return 0;
- }
+ /* random alignment */
+ rte_srand((unsigned int)rte_rdtsc());
+ const unsigned int align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
- mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("max_zone_aligned"),
- 0, SOCKET_ID_ANY, 0, align);
- if (mz == NULL){
- printf("Failed to reserve a big chunk of memory - %s\n",
- rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ /* memzone size may be between size and size - align */
+ minlen = find_max_block_free_size(align, socket);
+ maxlen = find_max_block_free_size(0, socket);
- if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size and alignment %u did not return"
- " bigest block\n", align);
- printf("Expected size = %zu, actual size = %zu\n",
- maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ if (minlen == 0 || maxlen == 0) {
+ printf("There is no space left for biggest %u-aligned memzone!\n",
+ align);
+ return 0;
+ }
- if (rte_memzone_free(mz)) {
- printf("Fail memzone free\n");
- return -1;
- }
+ mz = rte_memzone_reserve_aligned(
+ TEST_MEMZONE_NAME("max_zone_aligned"),
+ 0, socket, 0, align);
+ if (mz == NULL) {
+ printf("Failed to reserve a big chunk of memory - %s\n",
+ rte_strerror(rte_errno));
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+ if (mz->addr != RTE_PTR_ALIGN(mz->addr, align)) {
+ printf("Memzone reserve with 0 size and alignment %u did not return aligned block\n",
+ align);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+ if (mz->len < minlen || mz->len > maxlen) {
+ printf("Memzone reserve with 0 size and alignment %u did not return"
+ " bigest block\n", align);
+ printf("Expected size = %zu-%zu, actual size = %zu\n",
+ minlen, maxlen, mz->len);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ }
return 0;
}
@@ -890,7 +934,7 @@ test_memzone_basic(void)
const struct rte_memzone *mz;
int memzone_cnt_after, memzone_cnt_expected;
int memzone_cnt_before =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
SOCKET_ID_ANY, 0);
@@ -914,7 +958,7 @@ test_memzone_basic(void)
(memzone3 != NULL) + (memzone4 != NULL);
memzone_cnt_after =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
if (memzone_cnt_after != memzone_cnt_expected)
return -1;
@@ -993,7 +1037,7 @@ test_memzone_basic(void)
}
memzone_cnt_after =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
if (memzone_cnt_after != memzone_cnt_before)
return -1;
@@ -1014,7 +1058,8 @@ static int
test_memzone(void)
{
/* take note of how many memzones were allocated before running */
- int memzone_cnt = rte_eal_get_configuration()->mem_config->memzone_cnt;
+ int memzone_cnt =
+ rte_eal_get_configuration()->mem_config->memzones.count;
printf("test basic memzone API\n");
if (test_memzone_basic() < 0)
diff --git a/test/test/test_meter.c b/test/test/test_meter.c
index 9f6abf99..8bb47e75 100644
--- a/test/test/test_meter.c
+++ b/test/test/test_meter.c
@@ -53,43 +53,43 @@ static inline int
tm_test_srtcm_config(void)
{
#define SRTCM_CFG_MSG "srtcm_config"
- struct rte_meter_srtcm sm;
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm_params sparams1;
/* invalid parameter test */
- if(rte_meter_srtcm_config(NULL, NULL) == 0)
+ if (rte_meter_srtcm_profile_config(NULL, NULL) == 0)
melog(SRTCM_CFG_MSG);
- if(rte_meter_srtcm_config(&sm, NULL) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, NULL) == 0)
melog(SRTCM_CFG_MSG);
- if(rte_meter_srtcm_config(NULL, &sparams) == 0)
+ if (rte_meter_srtcm_profile_config(NULL, &sparams) == 0)
melog(SRTCM_CFG_MSG);
/* cbs and ebs can't both be zero */
sparams1 = sparams;
sparams1.cbs = 0;
sparams1.ebs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0)
melog(SRTCM_CFG_MSG);
/* cir should never be 0 */
sparams1 = sparams;
sparams1.cir = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0)
melog(SRTCM_CFG_MSG);
/* one of ebs and cbs can be zero, should be successful */
sparams1 = sparams;
sparams1.ebs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0)
melog(SRTCM_CFG_MSG);
sparams1 = sparams;
sparams1.cbs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0)
melog(SRTCM_CFG_MSG);
/* usual parameter, should be successful */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
melog(SRTCM_CFG_MSG);
return 0;
@@ -102,47 +102,47 @@ tm_test_srtcm_config(void)
static inline int
tm_test_trtcm_config(void)
{
- struct rte_meter_trtcm tm;
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm_params tparams1;
#define TRTCM_CFG_MSG "trtcm_config"
/* invalid parameter test */
- if(rte_meter_trtcm_config(NULL, NULL) == 0)
+ if (rte_meter_trtcm_profile_config(NULL, NULL) == 0)
melog(TRTCM_CFG_MSG);
- if(rte_meter_trtcm_config(&tm, NULL) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, NULL) == 0)
melog(TRTCM_CFG_MSG);
- if(rte_meter_trtcm_config(NULL, &tparams) == 0)
+ if (rte_meter_trtcm_profile_config(NULL, &tparams) == 0)
melog(TRTCM_CFG_MSG);
/* cir, cbs, pir and pbs never be zero */
tparams1 = tparams;
tparams1.cir = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.cbs = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.pbs = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.pir = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
/* pir should be greater or equal to cir */
tparams1 = tparams;
tparams1.pir = tparams1.cir - 1;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG" pir < cir test");
/* usual parameter, should be successful */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
melog(TRTCM_CFG_MSG);
return 0;
@@ -155,41 +155,50 @@ static inline int
tm_test_srtcm_color_blind_check(void)
{
#define SRTCM_BLIND_CHECK_MSG "srtcm_blind_check"
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm sm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
/* Test green */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF - 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1)
!= e_RTE_METER_GREEN)
melog(SRTCM_BLIND_CHECK_MSG" GREEN");
/* Test yellow */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF + 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1)
!= e_RTE_METER_YELLOW)
melog(SRTCM_BLIND_CHECK_MSG" YELLOW");
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, (uint32_t)sm.ebs - 1) != e_RTE_METER_YELLOW)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, (uint32_t)sp.ebs - 1) != e_RTE_METER_YELLOW)
melog(SRTCM_BLIND_CHECK_MSG" YELLOW");
/* Test red */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF + 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1)
!= e_RTE_METER_RED)
melog(SRTCM_BLIND_CHECK_MSG" RED");
@@ -206,41 +215,50 @@ tm_test_trtcm_color_blind_check(void)
#define TRTCM_BLIND_CHECK_MSG "trtcm_blind_check"
uint64_t time;
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm tm;
uint64_t hz = rte_get_tsc_hz();
/* Test green */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF - 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1)
!= e_RTE_METER_GREEN)
melog(TRTCM_BLIND_CHECK_MSG" GREEN");
/* Test yellow */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF + 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1)
!= e_RTE_METER_YELLOW)
melog(TRTCM_BLIND_CHECK_MSG" YELLOW");
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF - 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1)
!= e_RTE_METER_YELLOW)
melog(TRTCM_BLIND_CHECK_MSG" YELLOW");
/* Test red */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF + 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1)
!= e_RTE_METER_RED)
melog(TRTCM_BLIND_CHECK_MSG" RED");
@@ -262,36 +280,45 @@ tm_test_srtcm_aware_check
(enum rte_meter_color in[4], enum rte_meter_color out[4])
{
#define SRTCM_AWARE_CHECK_MSG "srtcm_aware_check"
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm sm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]);
return 0;
@@ -317,7 +344,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
/**
@@ -329,7 +356,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
/**
@@ -341,7 +368,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_RED;
out[2] = e_RTE_METER_RED;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
return 0;
@@ -360,36 +387,45 @@ tm_test_trtcm_aware_check
(enum rte_meter_color in[4], enum rte_meter_color out[4])
{
#define TRTCM_AWARE_CHECK_MSG "trtcm_aware_check"
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm tm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]);
return 0;
@@ -415,7 +451,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
in[0] = in[1] = in[2] = in[3] = e_RTE_METER_YELLOW;
@@ -423,7 +459,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
in[0] = in[1] = in[2] = in[3] = e_RTE_METER_RED;
@@ -431,7 +467,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_RED;
out[2] = e_RTE_METER_RED;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
return 0;
@@ -443,22 +479,22 @@ tm_test_trtcm_color_aware_check(void)
static int
test_meter(void)
{
- if(tm_test_srtcm_config() != 0 )
+ if (tm_test_srtcm_config() != 0)
return -1;
- if(tm_test_trtcm_config() != 0 )
+ if (tm_test_trtcm_config() != 0)
return -1;
- if(tm_test_srtcm_color_blind_check() != 0)
+ if (tm_test_srtcm_color_blind_check() != 0)
return -1;
- if(tm_test_trtcm_color_blind_check()!= 0)
+ if (tm_test_trtcm_color_blind_check() != 0)
return -1;
- if(tm_test_srtcm_color_aware_check()!= 0)
+ if (tm_test_srtcm_color_aware_check() != 0)
return -1;
- if(tm_test_trtcm_color_aware_check()!= 0)
+ if (tm_test_trtcm_color_aware_check() != 0)
return -1;
return 0;
diff --git a/test/test/test_mp_secondary.c b/test/test/test_mp_secondary.c
index cc46cf4d..b597dfcd 100644
--- a/test/test/test_mp_secondary.c
+++ b/test/test/test_mp_secondary.c
@@ -50,32 +50,6 @@
#define launch_proc(ARGV) process_dup(ARGV, \
sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
-#ifdef RTE_EXEC_ENV_LINUXAPP
-static char*
-get_current_prefix(char * prefix, int size)
-{
- char path[PATH_MAX] = {0};
- char buf[PATH_MAX] = {0};
-
- /* get file for config (fd is always 3) */
- snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
-
- /* return NULL on error */
- if (readlink(path, buf, sizeof(buf)) == -1)
- return NULL;
-
- /* get the basename */
- snprintf(buf, sizeof(buf), "%s", basename(buf));
-
- /* copy string all the way from second char up to start of _config */
- snprintf(prefix, size, "%.*s",
- (int)(strnlen(buf, sizeof(buf)) - sizeof("_config")),
- &buf[1]);
-
- return prefix;
-}
-#endif
-
/*
* This function is called in the primary i.e. main test, to spawn off secondary
* processes to run actual mp tests. Uses fork() and exec pair
diff --git a/test/test/test_pmd_perf.c b/test/test/test_pmd_perf.c
index 911dd762..4549965f 100644
--- a/test/test/test_pmd_perf.c
+++ b/test/test/test_pmd_perf.c
@@ -65,14 +65,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .hw_vlan_strip = 0, /**< VLAN strip enabled. */
- .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
- .enable_scatter = 0, /**< scatter rx disabled */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -97,11 +90,6 @@ static struct rte_eth_txconf tx_conf = {
},
.tx_free_thresh = 32, /* Use PMD default values */
.tx_rs_thresh = 32, /* Use PMD default values */
- .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP |
- ETH_TXQ_FLAGS_NOXSUMUDP |
- ETH_TXQ_FLAGS_NOXSUMTCP)
};
enum {
@@ -676,7 +664,7 @@ test_pmd_perf(void)
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < NB_ETHPORTS_USED) {
printf("At least %u port(s) used for perf. test\n",
NB_ETHPORTS_USED);
@@ -698,7 +686,7 @@ test_pmd_perf(void)
reset_count();
num = 0;
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (socketid == -1) {
socketid = rte_eth_dev_socket_id(portid);
slave_id = alloc_lcore(socketid);
@@ -791,7 +779,7 @@ test_pmd_perf(void)
return -1;
/* port tear down */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (socketid != rte_eth_dev_socket_id(portid))
continue;
@@ -808,38 +796,29 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
if (!strcmp(mode, "vector")) {
/* vector rx, tx */
- tx_conf.txq_flags = 0xf01;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 0;
- port_conf.rxmode.enable_scatter = 0;
return 0;
} else if (!strcmp(mode, "scalar")) {
/* bulk alloc rx, full-featured tx */
- tx_conf.txq_flags = 0;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 1;
- port_conf.rxmode.enable_scatter = 0;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
* when vec macro not define,
* using the same rx/tx as scalar
*/
- tx_conf.txq_flags = 0xf01;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 1;
- port_conf.rxmode.enable_scatter = 0;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
- tx_conf.txq_flags = 0x0; /* must condition */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 0;
- port_conf.rxmode.enable_scatter = 1; /* must condition */
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
return 0;
}
diff --git a/test/test/test_pmd_ring.c b/test/test/test_pmd_ring.c
index 4b891014..19d7d20a 100644
--- a/test/test/test_pmd_ring.c
+++ b/test/test/test_pmd_ring.c
@@ -218,6 +218,8 @@ test_pmd_ring_pair_create_attach(int portd, int porte)
struct rte_mbuf buf, *pbuf = &buf;
struct rte_eth_conf null_conf;
+ memset(&null_conf, 0, sizeof(struct rte_eth_conf));
+
if ((rte_eth_dev_configure(portd, 1, 1, &null_conf) < 0)
|| (rte_eth_dev_configure(porte, 1, 1, &null_conf) < 0)) {
printf("Configure failed for port\n");
@@ -399,7 +401,7 @@ test_pmd_ring(void)
int port, cmdl_port0 = -1;
uint8_t nb_ports;
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
printf("nb_ports=%d\n", (int)nb_ports);
/* create the rings and eth_rings in the test code.
@@ -473,7 +475,7 @@ test_pmd_ring(void)
return -1;
/* find a port created with the --vdev=net_ring0 command line option */
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(port, &dev_info);
diff --git a/test/test/test_power_acpi_cpufreq.c b/test/test/test_power_acpi_cpufreq.c
index 3bfd0335..22e541d6 100644
--- a/test/test/test_power_acpi_cpufreq.c
+++ b/test/test/test_power_acpi_cpufreq.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include <limits.h>
#include <string.h>
+#include <inttypes.h>
#include "test.h"
@@ -19,6 +20,13 @@ test_power_acpi_cpufreq(void)
return TEST_SKIPPED;
}
+static int
+test_power_acpi_caps(void)
+{
+ printf("Power management library not supported, skipping test\n");
+ return TEST_SKIPPED;
+}
+
#else
#include <rte_power.h>
@@ -27,7 +35,7 @@ test_power_acpi_cpufreq(void)
#define TEST_POWER_FREQS_NUM_MAX ((unsigned)RTE_MAX_LCORE_FREQS)
#define TEST_POWER_SYSFILE_CUR_FREQ \
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq"
+ "/sys/devices/system/cpu/cpu%u/cpufreq/cpuinfo_cur_freq"
static uint32_t total_freq_num;
static uint32_t freqs[TEST_POWER_FREQS_NUM_MAX];
@@ -517,6 +525,42 @@ fail_all:
rte_power_unset_env();
return -1;
}
+
+static int
+test_power_acpi_caps(void)
+{
+ struct rte_power_core_capabilities caps;
+ int ret;
+
+ ret = rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
+ if (ret) {
+ printf("Error setting ACPI environment\n");
+ return -1;
+ }
+
+ ret = rte_power_init(TEST_POWER_LCORE_ID);
+ if (ret < 0) {
+ printf("Cannot initialise power management for lcore %u, this "
+ "may occur if environment is not configured "
+ "correctly(APCI cpufreq) or operating in another valid "
+ "Power management environment\n", TEST_POWER_LCORE_ID);
+ rte_power_unset_env();
+ return -1;
+ }
+
+ ret = rte_power_get_capabilities(TEST_POWER_LCORE_ID, &caps);
+ if (ret) {
+ printf("ACPI: Error getting capabilities\n");
+ return -1;
+ }
+
+ printf("ACPI: Capabilities %"PRIx64"\n", caps.capabilities);
+
+ rte_power_unset_env();
+ return 0;
+}
+
#endif
REGISTER_TEST_COMMAND(power_acpi_cpufreq_autotest, test_power_acpi_cpufreq);
+REGISTER_TEST_COMMAND(power_acpi_caps_autotest, test_power_acpi_caps);
diff --git a/test/test/test_power_kvm_vm.c b/test/test/test_power_kvm_vm.c
index 91b31c44..bce706de 100644
--- a/test/test/test_power_kvm_vm.c
+++ b/test/test/test_power_kvm_vm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
#include <stdio.h>
@@ -98,7 +98,8 @@ test_power_kvm_vm(void)
printf("Cannot initialise power management for lcore %u, this "
"may occur if environment is not configured "
"correctly(KVM VM) or operating in another valid "
- "Power management environment\n", TEST_POWER_VM_LCORE_ID);
+ "Power management environment\n",
+ TEST_POWER_VM_LCORE_ID);
rte_power_unset_env();
return -1;
}
@@ -175,6 +176,22 @@ test_power_kvm_vm(void)
goto fail_all;
}
+ /* Test KVM_VM Enable Turbo of valid core */
+ ret = rte_power_freq_enable_turbo(TEST_POWER_VM_LCORE_ID);
+ if (ret == -1) {
+ printf("rte_power_freq_enable_turbo failed on valid lcore"
+ "%u\n", TEST_POWER_VM_LCORE_ID);
+ goto fail_all;
+ }
+
+ /* Test KVM_VM Disable Turbo of valid core */
+ ret = rte_power_freq_disable_turbo(TEST_POWER_VM_LCORE_ID);
+ if (ret == -1) {
+ printf("rte_power_freq_disable_turbo failed on valid lcore"
+ "%u\n", TEST_POWER_VM_LCORE_ID);
+ goto fail_all;
+ }
+
/* Test frequency up of valid lcore */
ret = rte_power_freq_up(TEST_POWER_VM_LCORE_ID);
if (ret != 1) {
diff --git a/test/test/test_reorder.c b/test/test/test_reorder.c
index 65e4f38b..ccee4d08 100644
--- a/test/test/test_reorder.c
+++ b/test/test/test_reorder.c
@@ -146,11 +146,11 @@ test_reorder_insert(void)
b = rte_reorder_create("test_insert", rte_socket_id(), size);
TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer");
- ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs);
- TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool");
-
- for (i = 0; i < num_bufs; i++)
+ for (i = 0; i < num_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(p);
+ TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n");
bufs[i]->seqn = i;
+ }
/* This should fill up order buffer:
* reorder_seq = 0
@@ -165,6 +165,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[i] = NULL;
}
/* early packet - should move mbufs to ready buf and move sequence window
@@ -179,6 +180,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[4] = NULL;
/* early packet from current sequence window - full ready buffer */
bufs[5]->seqn = 2 * size;
@@ -189,6 +191,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[5] = NULL;
/* late packet */
bufs[6]->seqn = 3 * size;
@@ -199,11 +202,15 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[6] = NULL;
ret = 0;
exit:
- rte_mempool_put_bulk(p, (void *)bufs, num_bufs);
rte_reorder_free(b);
+ for (i = 0; i < num_bufs; i++) {
+ if (bufs[i] != NULL)
+ rte_pktmbuf_free(bufs[i]);
+ }
return ret;
}
@@ -219,6 +226,10 @@ test_reorder_drain(void)
int ret = 0;
unsigned i, cnt;
+ /* initialize all robufs to NULL */
+ for (i = 0; i < num_bufs; i++)
+ robufs[i] = NULL;
+
/* This would create a reorder buffer instance consisting of:
* reorder_seq = 0
* ready_buf: RB[size] = {NULL, NULL, NULL, NULL}
@@ -227,9 +238,6 @@ test_reorder_drain(void)
b = rte_reorder_create("test_drain", rte_socket_id(), size);
TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer");
- ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs);
- TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool");
-
/* Check no drained packets if reorder is empty */
cnt = rte_reorder_drain(b, robufs, 1);
if (cnt != 0) {
@@ -239,8 +247,11 @@ test_reorder_drain(void)
goto exit;
}
- for (i = 0; i < num_bufs; i++)
+ for (i = 0; i < num_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(p);
+ TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n");
bufs[i]->seqn = i;
+ }
/* Insert packet with seqn 1:
* reorder_seq = 0
@@ -248,6 +259,7 @@ test_reorder_drain(void)
* OB[] = {1, NULL, NULL, NULL}
*/
rte_reorder_insert(b, bufs[1]);
+ bufs[1] = NULL;
cnt = rte_reorder_drain(b, robufs, 1);
if (cnt != 1) {
@@ -256,6 +268,8 @@ test_reorder_drain(void)
ret = -1;
goto exit;
}
+ if (robufs[0] != NULL)
+ rte_pktmbuf_free(robufs[i]);
/* Insert more packets
* RB[] = {NULL, NULL, NULL, NULL}
@@ -263,18 +277,22 @@ test_reorder_drain(void)
*/
rte_reorder_insert(b, bufs[2]);
rte_reorder_insert(b, bufs[3]);
+ bufs[2] = NULL;
+ bufs[3] = NULL;
/* Insert more packets
* RB[] = {NULL, NULL, NULL, NULL}
* OB[] = {NULL, 2, 3, 4}
*/
rte_reorder_insert(b, bufs[4]);
+ bufs[4] = NULL;
/* Insert more packets
* RB[] = {2, 3, 4, NULL}
* OB[] = {NULL, NULL, 7, NULL}
*/
rte_reorder_insert(b, bufs[7]);
+ bufs[7] = NULL;
/* drained expected packets */
cnt = rte_reorder_drain(b, robufs, 4);
@@ -284,6 +302,10 @@ test_reorder_drain(void)
ret = -1;
goto exit;
}
+ for (i = 0; i < 3; i++) {
+ if (robufs[i] != NULL)
+ rte_pktmbuf_free(robufs[i]);
+ }
/*
* RB[] = {NULL, NULL, NULL, NULL}
@@ -298,8 +320,13 @@ test_reorder_drain(void)
}
ret = 0;
exit:
- rte_mempool_put_bulk(p, (void *)bufs, num_bufs);
rte_reorder_free(b);
+ for (i = 0; i < num_bufs; i++) {
+ if (bufs[i] != NULL)
+ rte_pktmbuf_free(bufs[i]);
+ if (robufs[i] != NULL)
+ rte_pktmbuf_free(robufs[i]);
+ }
return ret;
}
diff --git a/test/test/test_resource.c b/test/test/test_resource.c
index a3a82f13..8f41e3ba 100644
--- a/test/test/test_resource.c
+++ b/test/test/test_resource.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <stdio.h>
diff --git a/test/test/test_service_cores.c b/test/test/test_service_cores.c
index 86b4073d..ec31882d 100644
--- a/test/test/test_service_cores.c
+++ b/test/test/test_service_cores.c
@@ -325,6 +325,78 @@ service_attr_get(void)
return unregister_all();
}
+/* verify service lcore attr get */
+static int
+service_lcore_attr_get(void)
+{
+ /* ensure all services unregistered so cycle counts are zero */
+ unregister_all();
+
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ service.callback = dummy_cb;
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+ service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
+ uint32_t id;
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, &id),
+ "Register of service failed");
+ rte_service_component_runstate_set(id, 1);
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(id, 1),
+ "Error: Service start returned non-zero");
+ rte_service_set_stats_enable(id, 1);
+
+ uint64_t lcore_attr_value = 0xdead;
+ uint32_t lcore_attr_id = UINT32_MAX;
+
+ /* check error return values */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_attr_get(UINT32_MAX,
+ lcore_attr_id, &lcore_attr_value),
+ "Invalid lcore_id didn't return -EINVAL");
+ TEST_ASSERT_EQUAL(-ENOTSUP, rte_service_lcore_attr_get(rte_lcore_id(),
+ lcore_attr_id, &lcore_attr_value),
+ "Non-service core didn't return -ENOTSUP");
+
+ /* Start service core to increment loop count */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Service core add did not return zero");
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(id, slcore_id, 1),
+ "Enabling valid service and core failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
+ "Starting service core failed");
+
+ /* wait for the service lcore to run */
+ rte_delay_ms(200);
+
+ lcore_attr_id = RTE_SERVICE_LCORE_ATTR_LOOPS;
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Valid lcore_attr_get() call didn't return success");
+ int loops_gt_zero = lcore_attr_value > 0;
+ TEST_ASSERT_EQUAL(1, loops_gt_zero,
+ "lcore_attr_get() failed to get loops "
+ "(expected > zero)");
+
+ lcore_attr_id++; // invalid lcore attr id
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Invalid lcore attr didn't return -EINVAL");
+
+ rte_service_lcore_stop(slcore_id);
+
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_reset_all(slcore_id),
+ "Valid lcore_attr_reset_all() didn't return success");
+
+ lcore_attr_id = RTE_SERVICE_LCORE_ATTR_LOOPS;
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Valid lcore_attr_get() call didn't return success");
+ TEST_ASSERT_EQUAL(0, lcore_attr_value,
+ "lcore_attr_get() didn't get correct loop count "
+ "(zero)");
+
+ return unregister_all();
+}
+
/* verify service dump */
static int
service_dump(void)
@@ -771,6 +843,48 @@ service_lcore_start_stop(void)
return unregister_all();
}
+/* stop a service and wait for it to become inactive */
+static int
+service_may_be_active(void)
+{
+ const uint32_t sid = 0;
+ int i;
+
+ /* expected failure cases */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_may_be_active(10000),
+ "Invalid service may be active check did not fail");
+
+ /* start the service */
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 1),
+ "Starting valid service failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Add service core failed when not in use before");
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_id, 1),
+ "Enabling valid service on valid core failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
+ "Service core start after add failed");
+
+ /* ensures core really is running the service function */
+ TEST_ASSERT_EQUAL(1, service_lcore_running_check(),
+ "Service core expected to poll service but it didn't");
+
+ /* stop the service */
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0),
+ "Error: Service stop returned non-zero");
+
+ /* give the service 100ms to stop running */
+ for (i = 0; i < 100; i++) {
+ if (!rte_service_may_be_active(sid))
+ break;
+ rte_delay_ms(SERVICE_DELAY);
+ }
+
+ TEST_ASSERT_EQUAL(0, rte_service_may_be_active(sid),
+ "Error: Service not stopped after 100ms");
+
+ return unregister_all();
+}
+
static struct unit_test_suite service_tests = {
.suite_name = "service core test suite",
.setup = testsuite_setup,
@@ -781,6 +895,7 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(dummy_register, NULL, service_get_by_name),
TEST_CASE_ST(dummy_register, NULL, service_dump),
TEST_CASE_ST(dummy_register, NULL, service_attr_get),
+ TEST_CASE_ST(dummy_register, NULL, service_lcore_attr_get),
TEST_CASE_ST(dummy_register, NULL, service_probe_capability),
TEST_CASE_ST(dummy_register, NULL, service_start_stop),
TEST_CASE_ST(dummy_register, NULL, service_lcore_add_del),
@@ -790,6 +905,7 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(dummy_register, NULL, service_mt_safe_poll),
TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_safe),
TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_unsafe),
+ TEST_CASE_ST(dummy_register, NULL, service_may_be_active),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
diff --git a/test/test/test_table.c b/test/test/test_table.c
index f01652dc..a4b0ed65 100644
--- a/test/test/test_table.c
+++ b/test/test/test_table.c
@@ -54,6 +54,17 @@ uint64_t pipeline_test_hash(void *key,
return signature;
}
+uint32_t pipeline_test_hash_cuckoo(const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed)
+{
+ const uint32_t *k32 = key;
+ uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
+ uint32_t signature = ip_dst;
+
+ return signature;
+}
+
static void
app_free_resources(void) {
int i;
diff --git a/test/test/test_table.h b/test/test/test_table.h
index a4d3ca0c..a66342cb 100644
--- a/test/test/test_table.h
+++ b/test/test/test_table.h
@@ -6,6 +6,7 @@
#include <rte_table_lpm.h>
#include <rte_table_lpm_ipv6.h>
#include <rte_table_hash.h>
+#include <rte_table_hash_cuckoo.h>
#include <rte_table_array.h>
#include <rte_pipeline.h>
@@ -106,6 +107,11 @@ uint64_t pipeline_test_hash(
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed);
+uint32_t pipeline_test_hash_cuckoo(
+ const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed);
+
/* Extern variables */
extern struct rte_pipeline *p;
extern struct rte_ring *rings_rx[N_PORTS];
diff --git a/test/test/test_table_combined.c b/test/test/test_table_combined.c
index 5e8e119a..73ad0155 100644
--- a/test/test/test_table_combined.c
+++ b/test/test/test_table_combined.c
@@ -778,14 +778,14 @@ test_table_hash_cuckoo_combined(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_params cuckoo_params = {
+ struct rte_table_hash_cuckoo_params cuckoo_params = {
.name = "TABLE",
.key_size = 32,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
.n_keys = 1 << 16,
.n_buckets = 1 << 16,
- .f_hash = pipeline_test_hash,
+ .f_hash = pipeline_test_hash_cuckoo,
.seed = 0,
};
diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 055a1a4e..441338ac 100644
--- a/test/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -69,9 +69,9 @@ rte_pipeline_table_action_handler_hit
table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts,
uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-rte_pipeline_table_action_handler_miss
+static int
table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts,
- uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
+ uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg);
rte_pipeline_table_action_handler_hit
table_action_0x00(__attribute__((unused)) struct rte_pipeline *p,
@@ -101,11 +101,11 @@ table_action_stub_hit(__attribute__((unused)) struct rte_pipeline *p,
return 0;
}
-rte_pipeline_table_action_handler_miss
+static int
table_action_stub_miss(struct rte_pipeline *p,
__attribute__((unused)) struct rte_mbuf **pkts,
uint64_t pkts_mask,
- __attribute__((unused)) struct rte_pipeline_table_entry **entry,
+ __attribute__((unused)) struct rte_pipeline_table_entry *entry,
__attribute__((unused)) void *arg)
{
printf("STUB Table Action Miss - setting mask to 0x%"PRIx64"\n",
@@ -517,8 +517,7 @@ test_table_pipeline(void)
/* TEST - one packet per port */
action_handler_hit = NULL;
- action_handler_miss =
- (rte_pipeline_table_action_handler_miss) table_action_stub_miss;
+ action_handler_miss = table_action_stub_miss;
table_entry_default_action = RTE_PIPELINE_ACTION_PORT;
override_miss_mask = 0x01; /* one packet per port */
setup_pipeline(e_TEST_STUB);
@@ -553,8 +552,7 @@ test_table_pipeline(void)
printf("TEST - two tables, hitmask override to 0x01\n");
connect_miss_action_to_table = 1;
- action_handler_miss =
- (rte_pipeline_table_action_handler_miss)table_action_stub_miss;
+ action_handler_miss = table_action_stub_miss;
override_miss_mask = 0x01;
setup_pipeline(e_TEST_STUB);
if (test_pipeline_single_filter(e_TEST_STUB, 2) < 0)
diff --git a/test/test/test_table_tables.c b/test/test/test_table_tables.c
index a7a69b85..20df2e92 100644
--- a/test/test/test_table_tables.c
+++ b/test/test/test_table_tables.c
@@ -903,14 +903,14 @@ test_table_hash_cuckoo(void)
uint32_t entry_size = 1;
/* Initialize params and create tables */
- struct rte_table_hash_params cuckoo_params = {
+ struct rte_table_hash_cuckoo_params cuckoo_params = {
.name = "TABLE",
.key_size = 32,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
.n_keys = 1 << 16,
.n_buckets = 1 << 16,
- .f_hash = (rte_table_hash_op_hash)pipeline_test_hash,
+ .f_hash = pipeline_test_hash_cuckoo,
.seed = 0,
};
@@ -941,7 +941,7 @@ test_table_hash_cuckoo(void)
if (table != NULL)
return -4;
- cuckoo_params.f_hash = pipeline_test_hash;
+ cuckoo_params.f_hash = pipeline_test_hash_cuckoo;
cuckoo_params.name = NULL;
table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 2f5b31db..591b3098 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -91,6 +91,7 @@ virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -216,10 +217,11 @@ static void
virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
{}
-static void
+static int
virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused struct ether_addr *addr)
{
+ return 0;
}
static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
@@ -589,6 +591,8 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+ rte_eth_dev_probing_finish(eth_dev);
+
return eth_dev->data->port_id;
err: