diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2018-06-01 09:09:08 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2018-06-01 09:12:07 +0200 |
commit | 1bd9b61222f3a81ffe770fc00b70ded6e760c42b (patch) | |
tree | 0bf7d996cf0664796687c1be6d22958fcf6a8096 /test | |
parent | bb4e158029645f37809fcf81a3acddd6fa11f88a (diff) |
New upstream version 18.05
Change-Id: Icd4170ddc4f63aeae5d0559490e5195b5349f9c2
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'test')
52 files changed, 7743 insertions, 554 deletions
diff --git a/test/bpf/dummy.c b/test/bpf/dummy.c new file mode 100644 index 00000000..5851469e --- /dev/null +++ b/test/bpf/dummy.c @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +/* + * eBPF program sample. + * does nothing always return success. + * used to measure BPF infrastructure overhead. + * To compile: + * clang -O2 -target bpf -c dummy.c + */ + +#include <stdint.h> +#include <stddef.h> + +uint64_t +entry(void *arg) +{ + return 1; +} diff --git a/test/bpf/mbuf.h b/test/bpf/mbuf.h new file mode 100644 index 00000000..f24f908d --- /dev/null +++ b/test/bpf/mbuf.h @@ -0,0 +1,578 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +/* + * Snipper from dpdk.org rte_mbuf.h. + * used to provide BPF programs information about rte_mbuf layout. + */ + +#ifndef _MBUF_H_ +#define _MBUF_H_ + +#include <stdint.h> +#include <rte_common.h> +#include <rte_memory.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Packet Offload Features Flags. It also carry packet type information. + * Critical resources. Both rx/tx shared these bits. Be cautious on any change + * + * - RX flags start at bit position zero, and get added to the left of previous + * flags. + * - The most-significant 3 bits are reserved for generic mbuf flags + * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get + * added to the right of the previously defined flags i.e. they should count + * downwards, not upwards. + * + * Keep these flags synchronized with rte_get_rx_ol_flag_name() and + * rte_get_tx_ol_flag_name(). + */ + +/** + * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when + * the packet is recognized as a VLAN, but the behavior between PMDs + * was not the same. This flag is kept for some time to avoid breaking + * applications and should be replaced by PKT_RX_VLAN_STRIPPED. + */ +#define PKT_RX_VLAN_PKT (1ULL << 0) + +#define PKT_RX_RSS_HASH (1ULL << 1) +/**< RX packet with RSS hash result. */ +#define PKT_RX_FDIR (1ULL << 2) +/**< RX packet with FDIR match indicate. */ + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_L4_CKSUM_MASK. + * This flag was set when the L4 checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) + +/** + * Deprecated. + * Checking this flag alone is deprecated: check the 2 bits of + * PKT_RX_IP_CKSUM_MASK. + * This flag was set when the IP checksum of a packet was detected as + * wrong by the hardware. + */ +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) + +#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) +/**< External IP header checksum error. */ + +/** + * A vlan has been stripped by the hardware and its tci is saved in + * mbuf->vlan_tci. This can only happen if vlan stripping is enabled + * in the RX configuration of the PMD. + */ +#define PKT_RX_VLAN_STRIPPED (1ULL << 6) + +/** + * Mask of bits used to determine the status of RX IP checksum. + * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum + * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong + * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid + * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet + * data, but the integrity of the IP header is verified. + */ +#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) + +#define PKT_RX_IP_CKSUM_UNKNOWN 0 +#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) +#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) +#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) + +/** + * Mask of bits used to determine the status of RX L4 checksum. + * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum + * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong + * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid + * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet + * data, but the integrity of the L4 data is verified. + */ +#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) + +#define PKT_RX_L4_CKSUM_UNKNOWN 0 +#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) +#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) +#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) + +#define PKT_RX_IEEE1588_PTP (1ULL << 9) +/**< RX IEEE1588 L2 Ethernet PT Packet. */ +#define PKT_RX_IEEE1588_TMST (1ULL << 10) +/**< RX IEEE1588 L2/L4 timestamped packet.*/ +#define PKT_RX_FDIR_ID (1ULL << 13) +/**< FD id reported if FDIR match. */ +#define PKT_RX_FDIR_FLX (1ULL << 14) +/**< Flexible bytes reported if FDIR match. */ + +/** + * The 2 vlans have been stripped by the hardware and their tci are + * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer). + * This can only happen if vlan stripping is enabled in the RX + * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED + * must also be set. + */ +#define PKT_RX_QINQ_STRIPPED (1ULL << 15) + +/** + * Deprecated. + * RX packet with double VLAN stripped. + * This flag is replaced by PKT_RX_QINQ_STRIPPED. + */ +#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED + +/** + * When packets are coalesced by a hardware or virtual driver, this flag + * can be set in the RX mbuf, meaning that the m->tso_segsz field is + * valid and is set to the segment size of original packets. + */ +#define PKT_RX_LRO (1ULL << 16) + +/** + * Indicate that the timestamp field in the mbuf is valid. + */ +#define PKT_RX_TIMESTAMP (1ULL << 17) + +/* add new RX flags here */ + +/* add new TX flags here */ + +/** + * Offload the MACsec. This flag must be set by the application to enable + * this offload feature for a packet to be transmitted. + */ +#define PKT_TX_MACSEC (1ULL << 44) + +/** + * Bits 45:48 used for the tunnel type. + * When doing Tx offload like TSO or checksum, the HW needs to configure the + * tunnel type into the HW descriptors. + */ +#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) +#define PKT_TX_TUNNEL_GRE (0x2ULL << 45) +#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) +#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) +/**< TX packet with MPLS-in-UDP RFC 7510 header. */ +#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) +/* add new TX TUNNEL type here */ +#define PKT_TX_TUNNEL_MASK (0xFULL << 45) + +/** + * Second VLAN insertion (QinQ) flag. + */ +#define PKT_TX_QINQ_PKT (1ULL << 49) +/**< TX packet with double VLAN inserted. */ + +/** + * TCP segmentation offload. To enable this offload feature for a + * packet to be transmitted on hardware supporting TSO: + * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies + * PKT_TX_TCP_CKSUM) + * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 + * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum + * to 0 in the packet + * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz + * - calculate the pseudo header checksum without taking ip_len in account, + * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and + * rte_ipv6_phdr_cksum() that can be used as helpers. + */ +#define PKT_TX_TCP_SEG (1ULL << 50) + +#define PKT_TX_IEEE1588_TMST (1ULL << 51) +/**< TX IEEE1588 packet to timestamp. */ + +/** + * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved, + * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware + * L4 checksum offload, the user needs to: + * - fill l2_len and l3_len in mbuf + * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM + * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 + * - calculate the pseudo header checksum and set it in the L4 header (only + * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum(). + * For SCTP, set the crc field to 0. + */ +#define PKT_TX_L4_NO_CKSUM (0ULL << 52) +/**< Disable L4 cksum of TX pkt. */ +#define PKT_TX_TCP_CKSUM (1ULL << 52) +/**< TCP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_SCTP_CKSUM (2ULL << 52) +/**< SCTP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_UDP_CKSUM (3ULL << 52) +/**< UDP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_L4_MASK (3ULL << 52) +/**< Mask for L4 cksum offload request. */ + +/** + * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should + * also be set by the application, although a PMD will only check + * PKT_TX_IP_CKSUM. + * - set the IP checksum field in the packet to 0 + * - fill the mbuf offload information: l2_len, l3_len + */ +#define PKT_TX_IP_CKSUM (1ULL << 54) + +/** + * Packet is IPv4. This flag must be set when using any offload feature + * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4 + * packet. If the packet is a tunneled packet, this flag is related to + * the inner headers. + */ +#define PKT_TX_IPV4 (1ULL << 55) + +/** + * Packet is IPv6. This flag must be set when using an offload feature + * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6 + * packet. If the packet is a tunneled packet, this flag is related to + * the inner headers. + */ +#define PKT_TX_IPV6 (1ULL << 56) + +#define PKT_TX_VLAN_PKT (1ULL << 57) +/**< TX packet is a 802.1q VLAN packet. */ + +/** + * Offload the IP checksum of an external header in the hardware. The + * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh + * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the + * packet must be set to 0. + * - set the outer IP checksum field in the packet to 0 + * - fill the mbuf offload information: outer_l2_len, outer_l3_len + */ +#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) + +/** + * Packet outer header is IPv4. This flag must be set when using any + * outer offload feature (L3 or L4 checksum) to tell the NIC that the + * outer header of the tunneled packet is an IPv4 packet. + */ +#define PKT_TX_OUTER_IPV4 (1ULL << 59) + +/** + * Packet outer header is IPv6. This flag must be set when using any + * outer offload feature (L4 checksum) to tell the NIC that the outer + * header of the tunneled packet is an IPv6 packet. + */ +#define PKT_TX_OUTER_IPV6 (1ULL << 60) + +/** + * Bitmask of all supported packet Tx offload features flags, + * which can be set for packet. + */ +#define PKT_TX_OFFLOAD_MASK ( \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_OUTER_IP_CKSUM | \ + PKT_TX_TCP_SEG | \ + PKT_TX_IEEE1588_TMST | \ + PKT_TX_QINQ_PKT | \ + PKT_TX_VLAN_PKT | \ + PKT_TX_TUNNEL_MASK | \ + PKT_TX_MACSEC) + +#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */ + +#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */ + +/* Use final bit of flags to indicate a control mbuf */ +#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */ + +/** Alignment constraint of mbuf private area. */ +#define RTE_MBUF_PRIV_ALIGN 8 + +/** + * Get the name of a RX offload flag + * + * @param mask + * The mask describing the flag. + * @return + * The name of this flag, or NULL if it's not a valid RX flag. + */ +const char *rte_get_rx_ol_flag_name(uint64_t mask); + +/** + * Dump the list of RX offload flags in a buffer + * + * @param mask + * The mask describing the RX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + +/** + * Get the name of a TX offload flag + * + * @param mask + * The mask describing the flag. Usually only one bit must be set. + * Several bits can be given if they belong to the same mask. + * Ex: PKT_TX_L4_MASK. + * @return + * The name of this flag, or NULL if it's not a valid TX flag. + */ +const char *rte_get_tx_ol_flag_name(uint64_t mask); + +/** + * Dump the list of TX offload flags in a buffer + * + * @param mask + * The mask describing the TX flags. + * @param buf + * The output buffer. + * @param buflen + * The length of the buffer. + * @return + * 0 on success, (-1) on error. + */ +int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen); + +/** + * Some NICs need at least 2KB buffer to RX standard Ethernet frame without + * splitting it into multiple segments. + * So, for mbufs that planned to be involved into RX/TX, the recommended + * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM. + */ +#define RTE_MBUF_DEFAULT_DATAROOM 2048 +#define RTE_MBUF_DEFAULT_BUF_SIZE \ + (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) + +/* define a set of marker types that can be used to refer to set points in the + * mbuf. + */ +__extension__ +typedef void *MARKER[0]; /**< generic marker for a point in a structure */ +__extension__ +typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */ +__extension__ +typedef uint64_t MARKER64[0]; +/**< marker that allows us to overwrite 8 bytes with a single assignment */ + +typedef struct { + volatile int16_t cnt; /**< An internal counter value. */ +} rte_atomic16_t; + +/** + * The generic rte_mbuf, containing a packet mbuf. + */ +struct rte_mbuf { + MARKER cacheline0; + + void *buf_addr; /**< Virtual address of segment buffer. */ + /** + * Physical address of segment buffer. + * Force alignment to 8-bytes, so as to ensure we have the exact + * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes + * working on vector drivers easier. + */ + phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t)); + + /* next 8 bytes are initialised on RX descriptor rearm */ + MARKER64 rearm_data; + uint16_t data_off; + + /** + * Reference counter. Its size should at least equal to the size + * of port field (16 bits), to support zero-copy broadcast. + * It should only be accessed using the following functions: + * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and + * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, + * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC + * config option. + */ + RTE_STD_C11 + union { + rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ + uint16_t refcnt; + /**< Non-atomically accessed refcnt */ + }; + uint16_t nb_segs; /**< Number of segments. */ + + /** Input port (16 bits to support more than 256 virtual ports). */ + uint16_t port; + + uint64_t ol_flags; /**< Offload features. */ + + /* remaining bytes are set on RX when pulling packet from descriptor */ + MARKER rx_descriptor_fields1; + + /* + * The packet type, which is the combination of outer/inner L2, L3, L4 + * and tunnel types. The packet_type is about data really present in the + * mbuf. Example: if vlan stripping is enabled, a received vlan packet + * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the + * vlan is stripped from the data. + */ + RTE_STD_C11 + union { + uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */ + struct { + uint32_t l2_type:4; /**< (Outer) L2 type. */ + uint32_t l3_type:4; /**< (Outer) L3 type. */ + uint32_t l4_type:4; /**< (Outer) L4 type. */ + uint32_t tun_type:4; /**< Tunnel type. */ + uint32_t inner_l2_type:4; /**< Inner L2 type. */ + uint32_t inner_l3_type:4; /**< Inner L3 type. */ + uint32_t inner_l4_type:4; /**< Inner L4 type. */ + }; + }; + + uint32_t pkt_len; /**< Total pkt len: sum of all segments. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */ + uint16_t vlan_tci; + + union { + uint32_t rss; /**< RSS hash result if RSS enabled */ + struct { + RTE_STD_C11 + union { + struct { + uint16_t hash; + uint16_t id; + }; + uint32_t lo; + /**< Second 4 flexible bytes */ + }; + uint32_t hi; + /**< First 4 flexible bytes or FD ID, dependent on + * PKT_RX_FDIR_* flag in ol_flags. + */ + } fdir; /**< Filter identifier if FDIR enabled */ + struct { + uint32_t lo; + uint32_t hi; + } sched; /**< Hierarchical scheduler */ + uint32_t usr; + /**< User defined tags. See rte_distributor_process() */ + } hash; /**< hash information */ + + /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */ + uint16_t vlan_tci_outer; + + uint16_t buf_len; /**< Length of segment buffer. */ + + /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference + * are not normalized but are always the same for a given port. + */ + uint64_t timestamp; + + /* second cache line - fields only used in slow path or on TX */ + MARKER cacheline1 __rte_cache_min_aligned; + + RTE_STD_C11 + union { + void *userdata; /**< Can be used for external metadata */ + uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */ + }; + + struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ + struct rte_mbuf *next; /**< Next segment of scattered packet. */ + + /* fields to support TX offloads */ + RTE_STD_C11 + union { + uint64_t tx_offload; /**< combined for easy fetch */ + __extension__ + struct { + uint64_t l2_len:7; + /**< L2 (MAC) Header Length for non-tunneling pkt. + * Outer_L4_len + ... + Inner_L2_len for tunneling pkt. + */ + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size */ + + /* fields for TX offloading of tunnels */ + uint64_t outer_l3_len:9; + /**< Outer L3 (IP) Hdr Length. */ + uint64_t outer_l2_len:7; + /**< Outer L2 (MAC) Hdr Length. */ + + /* uint64_t unused:8; */ + }; + }; + + /** Size of the application private data. In case of an indirect + * mbuf, it stores the direct mbuf private data size. + */ + uint16_t priv_size; + + /** Timesync flags for use with IEEE1588. */ + uint16_t timesync; + + /** Sequence number. See also rte_reorder_insert(). */ + uint32_t seqn; + +} __rte_cache_aligned; + + +/** + * Returns TRUE if given mbuf is indirect, or FALSE otherwise. + */ +#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) + +/** + * Returns TRUE if given mbuf is direct, or FALSE otherwise. + */ +#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb)) + +/** + * Private data in case of pktmbuf pool. + * + * A structure that contains some pktmbuf_pool-specific data that are + * appended after the mempool structure (in private data). + */ +struct rte_pktmbuf_pool_private { + uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */ + uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */ +}; + +/** + * A macro that points to an offset into the data in the mbuf. + * + * The returned pointer is cast to type t. Before using this + * function, the user must ensure that the first segment is large + * enough to accommodate its data. + * + * @param m + * The packet mbuf. + * @param o + * The offset into the mbuf data. + * @param t + * The type to cast the result into. + */ +#define rte_pktmbuf_mtod_offset(m, t, o) \ + ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) + +/** + * A macro that points to the start of the data in the mbuf. + * + * The returned pointer is cast to type t. Before using this + * function, the user must ensure that the first segment is large + * enough to accommodate its data. + * + * @param m + * The packet mbuf. + * @param t + * The type to cast the result into. + */ +#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) + +#ifdef __cplusplus +} +#endif + +#endif /* _MBUF_H_ */ diff --git a/test/bpf/t1.c b/test/bpf/t1.c new file mode 100644 index 00000000..60f9434a --- /dev/null +++ b/test/bpf/t1.c @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +/* + * eBPF program sample. + * Accepts pointer to first segment packet data as an input parameter. + * analog of tcpdump -s 1 -d 'dst 1.2.3.4 && udp && dst port 5000' + * (000) ldh [12] + * (001) jeq #0x800 jt 2 jf 12 + * (002) ld [30] + * (003) jeq #0x1020304 jt 4 jf 12 + * (004) ldb [23] + * (005) jeq #0x11 jt 6 jf 12 + * (006) ldh [20] + * (007) jset #0x1fff jt 12 jf 8 + * (008) ldxb 4*([14]&0xf) + * (009) ldh [x + 16] + * (010) jeq #0x1388 jt 11 jf 12 + * (011) ret #1 + * (012) ret #0 + * + * To compile: + * clang -O2 -target bpf -c t1.c + */ + +#include <stdint.h> +#include <net/ethernet.h> +#include <netinet/ip.h> +#include <netinet/udp.h> + +uint64_t +entry(void *pkt) +{ + struct ether_header *ether_header = (void *)pkt; + + if (ether_header->ether_type != __builtin_bswap16(0x0800)) + return 0; + + struct iphdr *iphdr = (void *)(ether_header + 1); + if (iphdr->protocol != 17 || (iphdr->frag_off & 0x1ffff) != 0 || + iphdr->daddr != __builtin_bswap32(0x1020304)) + return 0; + + int hlen = iphdr->ihl * 4; + struct udphdr *udphdr = (void *)iphdr + hlen; + + if (udphdr->dest != __builtin_bswap16(5000)) + return 0; + + return 1; +} diff --git a/test/bpf/t2.c b/test/bpf/t2.c new file mode 100644 index 00000000..69d7a4fe --- /dev/null +++ b/test/bpf/t2.c @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +/* + * eBPF program sample. + * Accepts pointer to struct rte_mbuf as an input parameter. + * cleanup mbuf's vlan_tci and all related RX flags + * (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED). + * Doesn't touch contents of packet data. + * To compile: + * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \ + * -target bpf -Wno-int-to-void-pointer-cast -c t2.c + */ + +#include <stdint.h> +#include <stddef.h> +#include <rte_config.h> +#include "mbuf.h" + +uint64_t +entry(void *pkt) +{ + struct rte_mbuf *mb; + + mb = pkt; + mb->vlan_tci = 0; + mb->ol_flags &= ~(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED); + + return 1; +} diff --git a/test/bpf/t3.c b/test/bpf/t3.c new file mode 100644 index 00000000..531b9cb8 --- /dev/null +++ b/test/bpf/t3.c @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +/* + * eBPF program sample. + * Accepts pointer to struct rte_mbuf as an input parameter. + * Dump the mbuf into stdout if it is an ARP packet (aka tcpdump 'arp'). + * To compile: + * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \ + * -target bpf -Wno-int-to-void-pointer-cast -c t3.c + */ + +#include <stdint.h> +#include <stddef.h> +#include <stdio.h> +#include <net/ethernet.h> +#include <rte_config.h> +#include "mbuf.h" + +extern void rte_pktmbuf_dump(FILE *, const struct rte_mbuf *, unsigned int); + +uint64_t +entry(const void *pkt) +{ + const struct rte_mbuf *mb; + const struct ether_header *eth; + + mb = pkt; + eth = rte_pktmbuf_mtod(mb, const struct ether_header *); + + if (eth->ether_type == __builtin_bswap16(ETHERTYPE_ARP)) + rte_pktmbuf_dump(stdout, mb, 64); + + return 1; +} diff --git a/test/test-pipeline/main.h b/test/test-pipeline/main.h index f844e941..59dcfddb 100644 --- a/test/test-pipeline/main.h +++ b/test/test-pipeline/main.h @@ -107,6 +107,10 @@ uint64_t test_hash(void *key, uint32_t key_size, uint64_t seed); +uint32_t test_hash_cuckoo(const void *key, + uint32_t key_size, + uint32_t seed); + void app_main_loop_worker(void); void app_main_loop_worker_pipeline_stub(void); void app_main_loop_worker_pipeline_hash(void); diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c index 11e2402d..c2014723 100644 --- a/test/test-pipeline/pipeline_hash.c +++ b/test/test-pipeline/pipeline_hash.c @@ -15,6 +15,7 @@ #include <rte_port_ring.h> #include <rte_table_hash.h> #include <rte_hash.h> +#include <rte_table_hash_cuckoo.h> #include <rte_pipeline.h> #include "main.h" @@ -151,6 +152,17 @@ app_main_loop_worker_pipeline_hash(void) { .seed = 0, }; + struct rte_table_hash_cuckoo_params table_hash_cuckoo_params = { + .name = "TABLE", + .key_size = key_size, + .key_offset = APP_METADATA_OFFSET(32), + .key_mask = NULL, + .n_keys = 1 << 24, + .n_buckets = 1 << 22, + .f_hash = test_hash_cuckoo, + .seed = 0, + }; + /* Table configuration */ switch (app.pipeline_type) { case e_APP_PIPELINE_HASH_KEY8_EXT: @@ -298,7 +310,7 @@ app_main_loop_worker_pipeline_hash(void) { { struct rte_pipeline_table_params table_params = { .ops = &rte_table_hash_cuckoo_ops, - .arg_create = &table_hash_params, + .arg_create = &table_hash_cuckoo_params, .f_action_hit = NULL, .f_action_miss = NULL, .arg_ah = NULL, @@ -379,6 +391,18 @@ uint64_t test_hash( return signature; } +uint32_t test_hash_cuckoo( + const void *key, + __attribute__((unused)) uint32_t key_size, + __attribute__((unused)) uint32_t seed) +{ + const uint32_t *k32 = key; + uint32_t ip_dst = rte_be_to_cpu_32(k32[0]); + uint32_t signature = (ip_dst >> 2) | ((ip_dst & 0x3) << 30); + + return signature; +} + void app_main_loop_rx_metadata(void) { uint32_t i, j; diff --git a/test/test/Makefile b/test/test/Makefile index a88cc38b..eccc8efc 100644 --- a/test/test/Makefile +++ b/test/test/Makefile @@ -161,7 +161,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += test_distributor_perf.c SRCS-$(CONFIG_RTE_LIBRTE_REORDER) += test_reorder.c -SRCS-y += test_devargs.c SRCS-y += virtual_pmd.c SRCS-y += packet_burst_generator.c SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_acl.c @@ -181,10 +180,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c +ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y) +SRCS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += test_compressdev.c +endif + ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y) SRCS-y += test_eventdev.c SRCS-y += test_event_ring.c SRCS-y += test_event_eth_rx_adapter.c +SRCS-y += test_event_timer_adapter.c +SRCS-y += test_event_crypto_adapter.c endif ifeq ($(CONFIG_RTE_LIBRTE_RAWDEV),y) @@ -193,6 +198,8 @@ endif SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c +SRCS-$(CONFIG_RTE_LIBRTE_BPF) += test_bpf.c + CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 @@ -201,6 +208,11 @@ CFLAGS += $(WERROR_FLAGS) CFLAGS += -D_GNU_SOURCE LDLIBS += -lm +ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y) +ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y) +LDLIBS += -lz +endif +endif # Disable VTA for memcpy test ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) @@ -211,6 +223,8 @@ CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments # designated initializers. ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1) CFLAGS_test_eventdev_sw.o += -Wno-missing-field-initializers +CFLAGS_test_event_timer_adapter.o += -Wno-missing-field-initializers +CFLAGS_test_event_crypto_adapter.o += -Wno-missing-field-initializers endif endif endif diff --git a/test/test/commands.c b/test/test/commands.c index cf0b726b..94fbc310 100644 --- a/test/test/commands.c +++ b/test/test/commands.c @@ -132,11 +132,13 @@ static void cmd_dump_parsed(void *parsed_result, else if (!strcmp(res->dump, "dump_mempool")) rte_mempool_list_dump(stdout); else if (!strcmp(res->dump, "dump_devargs")) - rte_eal_devargs_dump(stdout); + rte_devargs_dump(stdout); else if (!strcmp(res->dump, "dump_log_types")) rte_log_dump(stdout); else if (!strcmp(res->dump, "dump_malloc_stats")) rte_malloc_dump_stats(stdout, NULL); + else if (!strcmp(res->dump, "dump_malloc_heaps")) + rte_malloc_dump_heaps(stdout); } cmdline_parse_token_string_t cmd_dump_dump = @@ -147,6 +149,7 @@ cmdline_parse_token_string_t cmd_dump_dump = "dump_ring#" "dump_mempool#" "dump_malloc_stats#" + "dump_malloc_heaps#" "dump_devargs#" "dump_log_types"); diff --git a/test/test/meson.build b/test/test/meson.build index eb3d87a4..a907fd25 100644 --- a/test/test/meson.build +++ b/test/test/meson.build @@ -8,6 +8,7 @@ test_sources = files('commands.c', 'test_alarm.c', 'test_atomic.c', 'test_barrier.c', + 'test_bpf.c', 'test_byteorder.c', 'test_cmdline.c', 'test_cmdline_cirbuf.c', @@ -24,7 +25,6 @@ test_sources = files('commands.c', 'test_cryptodev_blockcipher.c', 'test_cycles.c', 'test_debug.c', - 'test_devargs.c', 'test_distributor.c', 'test_distributor_perf.c', 'test_eal_flags.c', @@ -98,6 +98,7 @@ test_sources = files('commands.c', ) test_deps = ['acl', + 'bpf', 'cfgfile', 'cmdline', 'cryptodev', @@ -135,7 +136,7 @@ test_names = [ 'cryptodev_sw_kasumi_autotest', 'cryptodev_sw_zuc_autotest', 'cryptodev_sw_armv8_autotest', - 'cryptodev_sw_mrvl_autotest', + 'cryptodev_sw_mvsam_autotest', 'cryptodev_dpaa2_sec_autotest', 'cryptodev_dpaa_sec_autotest', 'cycles_autotest', @@ -235,6 +236,14 @@ if dpdk_conf.has('RTE_LIBRTE_KNI') endif test_dep_objs = [] +compress_test_dep = dependency('zlib', required: false) +if compress_test_dep.found() + test_dep_objs += compress_test_dep + test_sources += 'test_compressdev.c' + test_deps += 'compressdev' + test_names += 'compressdev_autotest' +endif + foreach d:test_deps def_lib = get_option('default_library') test_dep_objs += get_variable(def_lib + '_rte_' + d) diff --git a/test/test/resource.c b/test/test/resource.c index 0e2b62cd..34465f16 100644 --- a/test/test/resource.c +++ b/test/test/resource.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 RehiveTech. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of RehiveTech nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 RehiveTech. All rights reserved. */ #include <stdio.h> diff --git a/test/test/resource.h b/test/test/resource.h index 1e961221..223fa22a 100644 --- a/test/test/resource.h +++ b/test/test/resource.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 RehiveTech. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of RehiveTech nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 RehiveTech. All rights reserved. */ #ifndef _RESOURCE_H_ diff --git a/test/test/test_bpf.c b/test/test/test_bpf.c new file mode 100644 index 00000000..cbd6be63 --- /dev/null +++ b/test/test/test_bpf.c @@ -0,0 +1,1759 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <stdio.h> +#include <string.h> +#include <stdint.h> +#include <inttypes.h> + +#include <rte_memory.h> +#include <rte_debug.h> +#include <rte_hexdump.h> +#include <rte_random.h> +#include <rte_byteorder.h> +#include <rte_errno.h> +#include <rte_bpf.h> + +#include "test.h" + +/* + * Basic functional tests for librte_bpf. + * The main procedure - load eBPF program, execute it and + * compare restuls with expected values. + */ + +struct dummy_offset { + uint64_t u64; + uint32_t u32; + uint16_t u16; + uint8_t u8; +}; + +struct dummy_vect8 { + struct dummy_offset in[8]; + struct dummy_offset out[8]; +}; + +#define TEST_FILL_1 0xDEADBEEF + +#define TEST_MUL_1 21 +#define TEST_MUL_2 -100 + +#define TEST_SHIFT_1 15 +#define TEST_SHIFT_2 33 + +#define TEST_JCC_1 0 +#define TEST_JCC_2 -123 +#define TEST_JCC_3 5678 +#define TEST_JCC_4 TEST_FILL_1 + +struct bpf_test { + const char *name; + size_t arg_sz; + struct rte_bpf_prm prm; + void (*prepare)(void *); + int (*check_result)(uint64_t, const void *); + uint32_t allow_fail; +}; + +/* + * Compare return value and result data with expected ones. + * Report a failure if they don't match. + */ +static int +cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc, + const void *exp_res, const void *ret_res, size_t res_sz) +{ + int32_t ret; + + ret = 0; + if (exp_rc != ret_rc) { + printf("%s@%d: invalid return value, expected: 0x%" PRIx64 + ",result: 0x%" PRIx64 "\n", + func, __LINE__, exp_rc, ret_rc); + ret |= -1; + } + + if (memcmp(exp_res, ret_res, res_sz) != 0) { + printf("%s: invalid value\n", func); + rte_memdump(stdout, "expected", exp_res, res_sz); + rte_memdump(stdout, "result", ret_res, res_sz); + ret |= -1; + } + + return ret; +} + +/* store immediate test-cases */ +static const struct ebpf_insn test_store1_prog[] = { + { + .code = (BPF_ST | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u8), + .imm = TEST_FILL_1, + }, + { + .code = (BPF_ST | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u16), + .imm = TEST_FILL_1, + }, + { + .code = (BPF_ST | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u32), + .imm = TEST_FILL_1, + }, + { + .code = (BPF_ST | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u64), + .imm = TEST_FILL_1, + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +test_store1_prepare(void *arg) +{ + struct dummy_offset *df; + + df = arg; + memset(df, 0, sizeof(*df)); +} + +static int +test_store1_check(uint64_t rc, const void *arg) +{ + const struct dummy_offset *dft; + struct dummy_offset dfe; + + dft = arg; + + memset(&dfe, 0, sizeof(dfe)); + dfe.u64 = (int32_t)TEST_FILL_1; + dfe.u32 = dfe.u64; + dfe.u16 = dfe.u64; + dfe.u8 = dfe.u64; + + return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe)); +} + +/* store register test-cases */ +static const struct ebpf_insn test_store2_prog[] = { + + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_FILL_1, + }, + { + .code = (BPF_STX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u8), + }, + { + .code = (BPF_STX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u16), + }, + { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u64), + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +/* load test-cases */ +static const struct ebpf_insn test_load1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u8), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u16), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u64), + }, + /* return sum */ + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_4, + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_3, + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +test_load1_prepare(void *arg) +{ + struct dummy_offset *df; + + df = arg; + + memset(df, 0, sizeof(*df)); + df->u64 = (int32_t)TEST_FILL_1; + df->u32 = df->u64; + df->u16 = df->u64; + df->u8 = df->u64; +} + +static int +test_load1_check(uint64_t rc, const void *arg) +{ + uint64_t v; + const struct dummy_offset *dft; + + dft = arg; + v = dft->u64; + v += dft->u32; + v += dft->u16; + v += dft->u8; + + return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft)); +} + +/* alu mul test-cases */ +static const struct ebpf_insn test_mul1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[2].u32), + }, + { + .code = (BPF_ALU | BPF_MUL | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_MUL_1, + }, + { + .code = (EBPF_ALU64 | BPF_MUL | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_MUL_2, + }, + { + .code = (BPF_ALU | BPF_MUL | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_2, + }, + { + .code = (EBPF_ALU64 | BPF_MUL | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_3, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[0].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[1].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[2].u64), + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +test_mul1_prepare(void *arg) +{ + struct dummy_vect8 *dv; + uint64_t v; + + dv = arg; + + v = rte_rand(); + + memset(dv, 0, sizeof(*dv)); + dv->in[0].u32 = v; + dv->in[1].u64 = v << 12 | v >> 6; + dv->in[2].u32 = -v; +} + +static int +test_mul1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4; + const struct dummy_vect8 *dvt; + struct dummy_vect8 dve; + + dvt = arg; + memset(&dve, 0, sizeof(dve)); + + r2 = dvt->in[0].u32; + r3 = dvt->in[1].u64; + r4 = dvt->in[2].u32; + + r2 = (uint32_t)r2 * TEST_MUL_1; + r3 *= TEST_MUL_2; + r4 = (uint32_t)(r4 * r2); + r4 *= r3; + + dve.out[0].u64 = r2; + dve.out[1].u64 = r3; + dve.out[2].u64 = r4; + + return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); +} + +/* alu shift test-cases */ +static const struct ebpf_insn test_shift1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[2].u32), + }, + { + .code = (BPF_ALU | BPF_LSH | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_SHIFT_1, + }, + { + .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_SHIFT_2, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[0].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[1].u64), + }, + { + .code = (BPF_ALU | BPF_RSH | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_4, + }, + { + .code = (EBPF_ALU64 | BPF_LSH | BPF_X), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_4, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[2].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[3].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[2].u32), + }, + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = sizeof(uint64_t) * CHAR_BIT - 1, + }, + { + .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = sizeof(uint32_t) * CHAR_BIT - 1, + }, + { + .code = (BPF_ALU | BPF_LSH | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[4].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[5].u64), + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +test_shift1_prepare(void *arg) +{ + struct dummy_vect8 *dv; + uint64_t v; + + dv = arg; + + v = rte_rand(); + + memset(dv, 0, sizeof(*dv)); + dv->in[0].u32 = v; + dv->in[1].u64 = v << 12 | v >> 6; + dv->in[2].u32 = (-v ^ 5); +} + +static int +test_shift1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4; + const struct dummy_vect8 *dvt; + struct dummy_vect8 dve; + + dvt = arg; + memset(&dve, 0, sizeof(dve)); + + r2 = dvt->in[0].u32; + r3 = dvt->in[1].u64; + r4 = dvt->in[2].u32; + + r2 = (uint32_t)r2 << TEST_SHIFT_1; + r3 = (int64_t)r3 >> TEST_SHIFT_2; + + dve.out[0].u64 = r2; + dve.out[1].u64 = r3; + + r2 = (uint32_t)r2 >> r4; + r3 <<= r4; + + dve.out[2].u64 = r2; + dve.out[3].u64 = r3; + + r2 = dvt->in[0].u32; + r3 = dvt->in[1].u64; + r4 = dvt->in[2].u32; + + r2 &= sizeof(uint64_t) * CHAR_BIT - 1; + r3 = (int64_t)r3 >> r2; + r2 &= sizeof(uint32_t) * CHAR_BIT - 1; + r4 = (uint32_t)r4 << r2; + + dve.out[4].u64 = r4; + dve.out[5].u64 = r3; + + return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); +} + +/* jmp test-cases */ +static const struct ebpf_insn test_jump1_prog[] = { + + [0] = { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0, + }, + [1] = { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + [2] = { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u64), + }, + [3] = { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u32), + }, + [4] = { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_5, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + [5] = { + .code = (BPF_JMP | BPF_JEQ | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_JCC_1, + .off = 8, + }, + [6] = { + .code = (BPF_JMP | EBPF_JSLE | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_JCC_2, + .off = 9, + }, + [7] = { + .code = (BPF_JMP | BPF_JGT | BPF_K), + .dst_reg = EBPF_REG_4, + .imm = TEST_JCC_3, + .off = 10, + }, + [8] = { + .code = (BPF_JMP | BPF_JSET | BPF_K), + .dst_reg = EBPF_REG_5, + .imm = TEST_JCC_4, + .off = 11, + }, + [9] = { + .code = (BPF_JMP | EBPF_JNE | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_3, + .off = 12, + }, + [10] = { + .code = (BPF_JMP | EBPF_JSGT | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_4, + .off = 13, + }, + [11] = { + .code = (BPF_JMP | EBPF_JLE | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_5, + .off = 14, + }, + [12] = { + .code = (BPF_JMP | BPF_JSET | BPF_X), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_5, + .off = 15, + }, + [13] = { + .code = (BPF_JMP | EBPF_EXIT), + }, + [14] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x1, + }, + [15] = { + .code = (BPF_JMP | BPF_JA), + .off = -10, + }, + [16] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x2, + }, + [17] = { + .code = (BPF_JMP | BPF_JA), + .off = -11, + }, + [18] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x4, + }, + [19] = { + .code = (BPF_JMP | BPF_JA), + .off = -12, + }, + [20] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x8, + }, + [21] = { + .code = (BPF_JMP | BPF_JA), + .off = -13, + }, + [22] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x10, + }, + [23] = { + .code = (BPF_JMP | BPF_JA), + .off = -14, + }, + [24] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x20, + }, + [25] = { + .code = (BPF_JMP | BPF_JA), + .off = -15, + }, + [26] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x40, + }, + [27] = { + .code = (BPF_JMP | BPF_JA), + .off = -16, + }, + [28] = { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0x80, + }, + [29] = { + .code = (BPF_JMP | BPF_JA), + .off = -17, + }, +}; + +static void +test_jump1_prepare(void *arg) +{ + struct dummy_vect8 *dv; + uint64_t v1, v2; + + dv = arg; + + v1 = rte_rand(); + v2 = rte_rand(); + + memset(dv, 0, sizeof(*dv)); + dv->in[0].u64 = v1; + dv->in[1].u64 = v2; + dv->in[0].u32 = (v1 << 12) + (v2 >> 6); + dv->in[1].u32 = (v2 << 12) - (v1 >> 6); +} + +static int +test_jump1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4, r5, rv; + const struct dummy_vect8 *dvt; + + dvt = arg; + + rv = 0; + r2 = dvt->in[0].u32; + r3 = dvt->in[0].u64; + r4 = dvt->in[1].u32; + r5 = dvt->in[1].u64; + + if (r2 == TEST_JCC_1) + rv |= 0x1; + if ((int64_t)r3 <= TEST_JCC_2) + rv |= 0x2; + if (r4 > TEST_JCC_3) + rv |= 0x4; + if (r5 & TEST_JCC_4) + rv |= 0x8; + if (r2 != r3) + rv |= 0x10; + if ((int64_t)r2 > (int64_t)r4) + rv |= 0x20; + if (r2 <= r5) + rv |= 0x40; + if (r3 & r5) + rv |= 0x80; + + return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv)); +} + +/* alu (add, sub, and, or, xor, neg) test-cases */ +static const struct ebpf_insn test_alu1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_5, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_FILL_1, + }, + { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_FILL_1, + }, + { + .code = (BPF_ALU | BPF_XOR | BPF_K), + .dst_reg = EBPF_REG_4, + .imm = TEST_FILL_1, + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_K), + .dst_reg = EBPF_REG_5, + .imm = TEST_FILL_1, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[0].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[1].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[2].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_5, + .off = offsetof(struct dummy_vect8, out[3].u64), + }, + { + .code = (BPF_ALU | BPF_OR | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_3, + }, + { + .code = (EBPF_ALU64 | BPF_XOR | BPF_X), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_4, + }, + { + .code = (BPF_ALU | BPF_SUB | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_5, + }, + { + .code = (EBPF_ALU64 | BPF_AND | BPF_X), + .dst_reg = EBPF_REG_5, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[4].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[5].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[6].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_5, + .off = offsetof(struct dummy_vect8, out[7].u64), + }, + /* return (-r2 + (-r3)) */ + { + .code = (BPF_ALU | BPF_NEG), + .dst_reg = EBPF_REG_2, + }, + { + .code = (EBPF_ALU64 | BPF_NEG), + .dst_reg = EBPF_REG_3, + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_3, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static int +test_alu1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4, r5, rv; + const struct dummy_vect8 *dvt; + struct dummy_vect8 dve; + + dvt = arg; + memset(&dve, 0, sizeof(dve)); + + r2 = dvt->in[0].u32; + r3 = dvt->in[0].u64; + r4 = dvt->in[1].u32; + r5 = dvt->in[1].u64; + + r2 = (uint32_t)r2 & TEST_FILL_1; + r3 |= (int32_t) TEST_FILL_1; + r4 = (uint32_t)r4 ^ TEST_FILL_1; + r5 += (int32_t)TEST_FILL_1; + + dve.out[0].u64 = r2; + dve.out[1].u64 = r3; + dve.out[2].u64 = r4; + dve.out[3].u64 = r5; + + r2 = (uint32_t)r2 | (uint32_t)r3; + r3 ^= r4; + r4 = (uint32_t)r4 - (uint32_t)r5; + r5 &= r2; + + dve.out[4].u64 = r2; + dve.out[5].u64 = r3; + dve.out[6].u64 = r4; + dve.out[7].u64 = r5; + + r2 = -(int32_t)r2; + rv = (uint32_t)r2; + r3 = -r3; + rv += r3; + + return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out)); +} + +/* endianness conversions (BE->LE/LE->BE) test-cases */ +static const struct ebpf_insn test_bele1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u16), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u64), + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), + .dst_reg = EBPF_REG_2, + .imm = sizeof(uint16_t) * CHAR_BIT, + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), + .dst_reg = EBPF_REG_3, + .imm = sizeof(uint32_t) * CHAR_BIT, + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), + .dst_reg = EBPF_REG_4, + .imm = sizeof(uint64_t) * CHAR_BIT, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[0].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[1].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[2].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u16), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u64), + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), + .dst_reg = EBPF_REG_2, + .imm = sizeof(uint16_t) * CHAR_BIT, + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), + .dst_reg = EBPF_REG_3, + .imm = sizeof(uint32_t) * CHAR_BIT, + }, + { + .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), + .dst_reg = EBPF_REG_4, + .imm = sizeof(uint64_t) * CHAR_BIT, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[3].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[4].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[5].u64), + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +test_bele1_prepare(void *arg) +{ + struct dummy_vect8 *dv; + + dv = arg; + + memset(dv, 0, sizeof(*dv)); + dv->in[0].u64 = rte_rand(); + dv->in[0].u32 = dv->in[0].u64; + dv->in[0].u16 = dv->in[0].u64; +} + +static int +test_bele1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4; + const struct dummy_vect8 *dvt; + struct dummy_vect8 dve; + + dvt = arg; + memset(&dve, 0, sizeof(dve)); + + r2 = dvt->in[0].u16; + r3 = dvt->in[0].u32; + r4 = dvt->in[0].u64; + + r2 = rte_cpu_to_be_16(r2); + r3 = rte_cpu_to_be_32(r3); + r4 = rte_cpu_to_be_64(r4); + + dve.out[0].u64 = r2; + dve.out[1].u64 = r3; + dve.out[2].u64 = r4; + + r2 = dvt->in[0].u16; + r3 = dvt->in[0].u32; + r4 = dvt->in[0].u64; + + r2 = rte_cpu_to_le_16(r2); + r3 = rte_cpu_to_le_32(r3); + r4 = rte_cpu_to_le_64(r4); + + dve.out[3].u64 = r2; + dve.out[4].u64 = r3; + dve.out[5].u64 = r4; + + return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); +} + +/* atomic add test-cases */ +static const struct ebpf_insn test_xadd1_prog[] = { + + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 1, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = -1, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_4, + .imm = TEST_FILL_1, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_5, + .imm = TEST_MUL_1, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_5, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_5, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_6, + .imm = TEST_MUL_2, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_6, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_6, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_7, + .imm = TEST_JCC_2, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_7, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_7, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_8, + .imm = TEST_JCC_3, + }, + { + .code = (BPF_STX | EBPF_XADD | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_8, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_STX | EBPF_XADD | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_8, + .off = offsetof(struct dummy_offset, u64), + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static int +test_xadd1_check(uint64_t rc, const void *arg) +{ + uint64_t rv; + const struct dummy_offset *dft; + struct dummy_offset dfe; + + dft = arg; + memset(&dfe, 0, sizeof(dfe)); + + rv = 1; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = -1; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = (int32_t)TEST_FILL_1; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = TEST_MUL_1; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = TEST_MUL_2; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = TEST_JCC_2; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + rv = TEST_JCC_3; + rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv); + rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv); + + return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe)); +} + +/* alu div test-cases */ +static const struct ebpf_insn test_div1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[0].u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[1].u64), + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[2].u32), + }, + { + .code = (BPF_ALU | BPF_DIV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = TEST_MUL_1, + }, + { + .code = (EBPF_ALU64 | BPF_MOD | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_MUL_2, + }, + { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 1, + }, + { + .code = (EBPF_ALU64 | BPF_OR | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = 1, + }, + { + .code = (BPF_ALU | BPF_MOD | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_2, + }, + { + .code = (EBPF_ALU64 | BPF_DIV | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_3, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + .off = offsetof(struct dummy_vect8, out[0].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = offsetof(struct dummy_vect8, out[1].u64), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_4, + .off = offsetof(struct dummy_vect8, out[2].u64), + }, + /* check that we can handle division by zero gracefully. */ + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_vect8, in[3].u32), + }, + { + .code = (BPF_ALU | BPF_DIV | BPF_X), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_2, + }, + /* return 1 */ + { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static int +test_div1_check(uint64_t rc, const void *arg) +{ + uint64_t r2, r3, r4; + const struct dummy_vect8 *dvt; + struct dummy_vect8 dve; + + dvt = arg; + memset(&dve, 0, sizeof(dve)); + + r2 = dvt->in[0].u32; + r3 = dvt->in[1].u64; + r4 = dvt->in[2].u32; + + r2 = (uint32_t)r2 / TEST_MUL_1; + r3 %= TEST_MUL_2; + r2 |= 1; + r3 |= 1; + r4 = (uint32_t)(r4 % r2); + r4 /= r3; + + dve.out[0].u64 = r2; + dve.out[1].u64 = r3; + dve.out[2].u64 = r4; + + /* + * in the test prog we attempted to divide by zero. + * so return value should return 0. + */ + return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out)); +} + +/* call test-cases */ +static const struct ebpf_insn test_call1_prog[] = { + + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u32), + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = offsetof(struct dummy_offset, u64), + }, + { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_2, + .off = -4, + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_3, + .off = -16, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_10, + }, + { + .code = (EBPF_ALU64 | BPF_SUB | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 4, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_10, + }, + { + .code = (EBPF_ALU64 | BPF_SUB | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = 16, + }, + { + .code = (BPF_JMP | EBPF_CALL), + .imm = 0, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_10, + .off = -4, + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_10, + .off = -16 + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +dummy_func1(const void *p, uint32_t *v32, uint64_t *v64) +{ + const struct dummy_offset *dv; + + dv = p; + + v32[0] += dv->u16; + v64[0] += dv->u8; +} + +static int +test_call1_check(uint64_t rc, const void *arg) +{ + uint32_t v32; + uint64_t v64; + const struct dummy_offset *dv; + + dv = arg; + + v32 = dv->u32; + v64 = dv->u64; + dummy_func1(arg, &v32, &v64); + v64 += v32; + + if (v64 != rc) { + printf("%s@%d: invalid return value " + "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n", + __func__, __LINE__, v64, rc); + return -1; + } + return 0; + return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv)); +} + +static const struct rte_bpf_xsym test_call1_xsym[] = { + { + .name = RTE_STR(dummy_func1), + .type = RTE_BPF_XTYPE_FUNC, + .func = (void *)dummy_func1, + }, +}; + +static const struct bpf_test tests[] = { + { + .name = "test_store1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_store1_prog, + .nb_ins = RTE_DIM(test_store1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_store1_check, + }, + { + .name = "test_store2", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_store2_prog, + .nb_ins = RTE_DIM(test_store2_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_store1_check, + }, + { + .name = "test_load1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_load1_prog, + .nb_ins = RTE_DIM(test_load1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_load1_prepare, + .check_result = test_load1_check, + }, + { + .name = "test_mul1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_mul1_prog, + .nb_ins = RTE_DIM(test_mul1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_mul1_prepare, + .check_result = test_mul1_check, + }, + { + .name = "test_shift1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_shift1_prog, + .nb_ins = RTE_DIM(test_shift1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_shift1_prepare, + .check_result = test_shift1_check, + }, + { + .name = "test_jump1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_jump1_prog, + .nb_ins = RTE_DIM(test_jump1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_jump1_prepare, + .check_result = test_jump1_check, + }, + { + .name = "test_alu1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_alu1_prog, + .nb_ins = RTE_DIM(test_alu1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_jump1_prepare, + .check_result = test_alu1_check, + }, + { + .name = "test_bele1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_bele1_prog, + .nb_ins = RTE_DIM(test_bele1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_bele1_prepare, + .check_result = test_bele1_check, + }, + { + .name = "test_xadd1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_xadd1_prog, + .nb_ins = RTE_DIM(test_xadd1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_xadd1_check, + }, + { + .name = "test_div1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_div1_prog, + .nb_ins = RTE_DIM(test_div1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_mul1_prepare, + .check_result = test_div1_check, + }, + { + .name = "test_call1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_call1_prog, + .nb_ins = RTE_DIM(test_call1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + .xsym = test_call1_xsym, + .nb_xsym = RTE_DIM(test_call1_xsym), + }, + .prepare = test_load1_prepare, + .check_result = test_call1_check, + /* for now don't support function calls on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, +}; + +static int +run_test(const struct bpf_test *tst) +{ + int32_t ret, rv; + int64_t rc; + struct rte_bpf *bpf; + struct rte_bpf_jit jit; + uint8_t tbuf[tst->arg_sz]; + + printf("%s(%s) start\n", __func__, tst->name); + + bpf = rte_bpf_load(&tst->prm); + if (bpf == NULL) { + printf("%s@%d: failed to load bpf code, error=%d(%s);\n", + __func__, __LINE__, rte_errno, strerror(rte_errno)); + return -1; + } + + tst->prepare(tbuf); + + rc = rte_bpf_exec(bpf, tbuf); + ret = tst->check_result(rc, tbuf); + if (ret != 0) { + printf("%s@%d: check_result(%s) failed, error: %d(%s);\n", + __func__, __LINE__, tst->name, ret, strerror(ret)); + } + + rte_bpf_get_jit(bpf, &jit); + if (jit.func == NULL) + return 0; + + tst->prepare(tbuf); + rc = jit.func(tbuf); + rv = tst->check_result(rc, tbuf); + ret |= rv; + if (rv != 0) { + printf("%s@%d: check_result(%s) failed, error: %d(%s);\n", + __func__, __LINE__, tst->name, rv, strerror(ret)); + } + + rte_bpf_destroy(bpf); + return ret; + +} + +static int +test_bpf(void) +{ + int32_t rc, rv; + uint32_t i; + + rc = 0; + for (i = 0; i != RTE_DIM(tests); i++) { + rv = run_test(tests + i); + if (tests[i].allow_fail == 0) + rc |= rv; + } + + return rc; +} + +REGISTER_TEST_COMMAND(bpf_autotest, test_bpf); diff --git a/test/test/test_cmdline_cirbuf.c b/test/test/test_cmdline_cirbuf.c index e9193f66..8ac326cb 100644 --- a/test/test/test_cmdline_cirbuf.c +++ b/test/test/test_cmdline_cirbuf.c @@ -483,7 +483,7 @@ test_cirbuf_string_get_del_partial(void) memset(tmp, 0, sizeof(tmp)); memset(tmp2, 0, sizeof(tmp)); - snprintf(tmp2, sizeof(tmp2), "%s", CIRBUF_STR_HEAD); + strlcpy(tmp2, CIRBUF_STR_HEAD, sizeof(tmp2)); /* * initialize circular buffer diff --git a/test/test/test_cmdline_ipaddr.c b/test/test/test_cmdline_ipaddr.c index 2eb5a774..8ee7f628 100644 --- a/test/test/test_cmdline_ipaddr.c +++ b/test/test/test_cmdline_ipaddr.c @@ -87,8 +87,6 @@ const struct ipaddr_str ipaddr_valid_strs[] = { CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK}, {"192.168.1.0/24", {AF_INET, {IP4(192,168,1,0)}, 24}, CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK}, - {"012.34.56.78/24", {AF_INET, {IP4(12,34,56,78)}, 24}, - CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK}, {"34.56.78.90/1", {AF_INET, {IP4(34,56,78,90)}, 1}, CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK}, {"::", {AF_INET6, {IP6(0,0,0,0,0,0,0,0)}, 0}, diff --git a/test/test/test_common.c b/test/test/test_common.c index d0342430..7a67e458 100644 --- a/test/test/test_common.c +++ b/test/test/test_common.c @@ -3,6 +3,7 @@ */ #include <stdio.h> +#include <inttypes.h> #include <string.h> #include <math.h> #include <rte_common.h> @@ -70,6 +71,9 @@ test_align(void) #define FAIL_ALIGN(x, i, p)\ {printf(x "() test failed: %u %u\n", i, p);\ return -1;} +#define FAIL_ALIGN64(x, j, q)\ + {printf(x "() test failed: %"PRIu64" %"PRIu64"\n", j, q);\ + return -1; } #define ERROR_FLOOR(res, i, pow) \ (res % pow) || /* check if not aligned */ \ ((res / pow) != (i / pow)) /* check if correct alignment */ @@ -80,6 +84,7 @@ test_align(void) val / pow != (i / pow) + 1) /* if not aligned, hence +1 */ uint32_t i, p, val; + uint64_t j, q; for (i = 1, p = 1; i <= MAX_NUM; i ++) { if (rte_align32pow2(i) != p) @@ -88,6 +93,27 @@ test_align(void) p <<= 1; } + for (i = 1, p = 1; i <= MAX_NUM; i++) { + if (rte_align32prevpow2(i) != p) + FAIL_ALIGN("rte_align32prevpow2", i, p); + if (rte_is_power_of_2(i + 1)) + p = i + 1; + } + + for (j = 1, q = 1; j <= MAX_NUM ; j++) { + if (rte_align64pow2(j) != q) + FAIL_ALIGN64("rte_align64pow2", j, q); + if (j == q) + q <<= 1; + } + + for (j = 1, q = 1; j <= MAX_NUM ; j++) { + if (rte_align64prevpow2(j) != q) + FAIL_ALIGN64("rte_align64prevpow2", j, q); + if (rte_is_power_of_2(j + 1)) + q = j + 1; + } + for (p = 2; p <= MAX_NUM; p <<= 1) { if (!rte_is_power_of_2(p)) @@ -128,6 +154,18 @@ test_align(void) FAIL("rte_is_aligned"); } } + + for (p = 1; p <= MAX_NUM / 2; p++) { + for (i = 1; i <= MAX_NUM / 2; i++) { + val = RTE_ALIGN_MUL_CEIL(i, p); + if (val % p != 0 || val < i) + FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p); + val = RTE_ALIGN_MUL_FLOOR(i, p); + if (val % p != 0 || val > i) + FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p); + } + } + return 0; } diff --git a/test/test/test_compressdev.c b/test/test/test_compressdev.c new file mode 100644 index 00000000..640942ba --- /dev/null +++ b/test/test/test_compressdev.c @@ -0,0 +1,1133 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#include <string.h> +#include <zlib.h> +#include <math.h> + +#include <rte_cycles.h> +#include <rte_malloc.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_compressdev.h> + +#include "test_compressdev_test_buffer.h" +#include "test.h" + +#define DEFAULT_WINDOW_SIZE 15 +#define DEFAULT_MEM_LEVEL 8 +#define MAX_DEQD_RETRIES 10 +#define DEQUEUE_WAIT_TIME 10000 + +/* + * 30% extra size for compressed data compared to original data, + * in case data size cannot be reduced and it is actually bigger + * due to the compress block headers + */ +#define COMPRESS_BUF_SIZE_RATIO 1.3 +#define NUM_MBUFS 16 +#define NUM_OPS 16 +#define NUM_MAX_XFORMS 16 +#define NUM_MAX_INFLIGHT_OPS 128 +#define CACHE_SIZE 0 + +const char * +huffman_type_strings[] = { + [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default", + [RTE_COMP_HUFFMAN_FIXED] = "Fixed", + [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic" +}; + +enum zlib_direction { + ZLIB_NONE, + ZLIB_COMPRESS, + ZLIB_DECOMPRESS, + ZLIB_ALL +}; + +struct priv_op_data { + uint16_t orig_idx; +}; + +struct comp_testsuite_params { + struct rte_mempool *mbuf_pool; + struct rte_mempool *op_pool; + struct rte_comp_xform *def_comp_xform; + struct rte_comp_xform *def_decomp_xform; +}; + +static struct comp_testsuite_params testsuite_params = { 0 }; + +static void +testsuite_teardown(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + + rte_mempool_free(ts_params->mbuf_pool); + rte_mempool_free(ts_params->op_pool); + rte_free(ts_params->def_comp_xform); + rte_free(ts_params->def_decomp_xform); +} + +static int +testsuite_setup(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + unsigned int i; + + if (rte_compressdev_count() == 0) { + RTE_LOG(ERR, USER1, "Need at least one compress device\n"); + return TEST_FAILED; + } + + uint32_t max_buf_size = 0; + for (i = 0; i < RTE_DIM(compress_test_bufs); i++) + max_buf_size = RTE_MAX(max_buf_size, + strlen(compress_test_bufs[i]) + 1); + + max_buf_size *= COMPRESS_BUF_SIZE_RATIO; + /* + * Buffers to be used in compression and decompression. + * Since decompressed data might be larger than + * compressed data (due to block header), + * buffers should be big enough for both cases. + */ + ts_params->mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", + NUM_MBUFS, + CACHE_SIZE, 0, + max_buf_size + RTE_PKTMBUF_HEADROOM, + rte_socket_id()); + if (ts_params->mbuf_pool == NULL) { + RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n"); + return TEST_FAILED; + } + + ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS, + 0, sizeof(struct priv_op_data), + rte_socket_id()); + if (ts_params->op_pool == NULL) { + RTE_LOG(ERR, USER1, "Operation pool could not be created\n"); + goto exit; + } + + ts_params->def_comp_xform = + rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); + if (ts_params->def_comp_xform == NULL) { + RTE_LOG(ERR, USER1, + "Default compress xform could not be created\n"); + goto exit; + } + ts_params->def_decomp_xform = + rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); + if (ts_params->def_decomp_xform == NULL) { + RTE_LOG(ERR, USER1, + "Default decompress xform could not be created\n"); + goto exit; + } + + /* Initializes default values for compress/decompress xforms */ + ts_params->def_comp_xform->type = RTE_COMP_COMPRESS; + ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE, + ts_params->def_comp_xform->compress.deflate.huffman = + RTE_COMP_HUFFMAN_DEFAULT; + ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT; + ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE; + ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE; + + ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS; + ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE, + ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE; + ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE; + + return TEST_SUCCESS; + +exit: + testsuite_teardown(); + + return TEST_FAILED; +} + +static int +generic_ut_setup(void) +{ + /* Configure compressdev (one device, one queue pair) */ + struct rte_compressdev_config config = { + .socket_id = rte_socket_id(), + .nb_queue_pairs = 1, + .max_nb_priv_xforms = NUM_MAX_XFORMS, + .max_nb_streams = 0 + }; + + if (rte_compressdev_configure(0, &config) < 0) { + RTE_LOG(ERR, USER1, "Device configuration failed\n"); + return -1; + } + + if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS, + rte_socket_id()) < 0) { + RTE_LOG(ERR, USER1, "Queue pair setup failed\n"); + return -1; + } + + if (rte_compressdev_start(0) < 0) { + RTE_LOG(ERR, USER1, "Device could not be started\n"); + return -1; + } + + return 0; +} + +static void +generic_ut_teardown(void) +{ + rte_compressdev_stop(0); + if (rte_compressdev_close(0) < 0) + RTE_LOG(ERR, USER1, "Device could not be closed\n"); +} + +static int +test_compressdev_invalid_configuration(void) +{ + struct rte_compressdev_config invalid_config; + struct rte_compressdev_config valid_config = { + .socket_id = rte_socket_id(), + .nb_queue_pairs = 1, + .max_nb_priv_xforms = NUM_MAX_XFORMS, + .max_nb_streams = 0 + }; + struct rte_compressdev_info dev_info; + + /* Invalid configuration with 0 queue pairs */ + memcpy(&invalid_config, &valid_config, + sizeof(struct rte_compressdev_config)); + invalid_config.nb_queue_pairs = 0; + + TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config), + "Device configuration was successful " + "with no queue pairs (invalid)\n"); + + /* + * Invalid configuration with too many queue pairs + * (if there is an actual maximum number of queue pairs) + */ + rte_compressdev_info_get(0, &dev_info); + if (dev_info.max_nb_queue_pairs != 0) { + memcpy(&invalid_config, &valid_config, + sizeof(struct rte_compressdev_config)); + invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1; + + TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config), + "Device configuration was successful " + "with too many queue pairs (invalid)\n"); + } + + /* Invalid queue pair setup, with no number of queue pairs set */ + TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0, + NUM_MAX_INFLIGHT_OPS, rte_socket_id()), + "Queue pair setup was successful " + "with no queue pairs set (invalid)\n"); + + return TEST_SUCCESS; +} + +static int +compare_buffers(const char *buffer1, uint32_t buffer1_len, + const char *buffer2, uint32_t buffer2_len) +{ + if (buffer1_len != buffer2_len) { + RTE_LOG(ERR, USER1, "Buffer lengths are different\n"); + return -1; + } + + if (memcmp(buffer1, buffer2, buffer1_len) != 0) { + RTE_LOG(ERR, USER1, "Buffers are different\n"); + return -1; + } + + return 0; +} + +/* + * Maps compressdev and Zlib flush flags + */ +static int +map_zlib_flush_flag(enum rte_comp_flush_flag flag) +{ + switch (flag) { + case RTE_COMP_FLUSH_NONE: + return Z_NO_FLUSH; + case RTE_COMP_FLUSH_SYNC: + return Z_SYNC_FLUSH; + case RTE_COMP_FLUSH_FULL: + return Z_FULL_FLUSH; + case RTE_COMP_FLUSH_FINAL: + return Z_FINISH; + /* + * There should be only the values above, + * so this should never happen + */ + default: + return -1; + } +} + +static int +compress_zlib(struct rte_comp_op *op, + const struct rte_comp_xform *xform, int mem_level) +{ + z_stream stream; + int zlib_flush; + int strategy, window_bits, comp_level; + int ret = -1; + + /* initialize zlib stream */ + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + stream.opaque = Z_NULL; + + if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED) + strategy = Z_FIXED; + else + strategy = Z_DEFAULT_STRATEGY; + + /* + * Window bits is the base two logarithm of the window size (in bytes). + * When doing raw DEFLATE, this number will be negative. + */ + window_bits = -(xform->compress.window_size); + + comp_level = xform->compress.level; + + if (comp_level != RTE_COMP_LEVEL_NONE) + ret = deflateInit2(&stream, comp_level, Z_DEFLATED, + window_bits, mem_level, strategy); + else + ret = deflateInit(&stream, Z_NO_COMPRESSION); + + if (ret != Z_OK) { + printf("Zlib deflate could not be initialized\n"); + goto exit; + } + + /* Assuming stateless operation */ + stream.avail_in = op->src.length; + stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *); + stream.avail_out = op->m_dst->data_len; + stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *); + + /* Stateless operation, all buffer will be compressed in one go */ + zlib_flush = map_zlib_flush_flag(op->flush_flag); + ret = deflate(&stream, zlib_flush); + + if (stream.avail_in != 0) { + RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n"); + goto exit; + } + + if (ret != Z_STREAM_END) + goto exit; + + op->consumed = op->src.length - stream.avail_in; + op->produced = op->m_dst->data_len - stream.avail_out; + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + deflateReset(&stream); + + ret = 0; +exit: + deflateEnd(&stream); + + return ret; +} + +static int +decompress_zlib(struct rte_comp_op *op, + const struct rte_comp_xform *xform) +{ + z_stream stream; + int window_bits; + int zlib_flush; + int ret = TEST_FAILED; + + /* initialize zlib stream */ + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + stream.opaque = Z_NULL; + + /* + * Window bits is the base two logarithm of the window size (in bytes). + * When doing raw DEFLATE, this number will be negative. + */ + window_bits = -(xform->decompress.window_size); + + ret = inflateInit2(&stream, window_bits); + + if (ret != Z_OK) { + printf("Zlib deflate could not be initialized\n"); + goto exit; + } + + /* Assuming stateless operation */ + stream.avail_in = op->src.length; + stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *); + stream.avail_out = op->m_dst->data_len; + stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *); + + /* Stateless operation, all buffer will be compressed in one go */ + zlib_flush = map_zlib_flush_flag(op->flush_flag); + ret = inflate(&stream, zlib_flush); + + if (stream.avail_in != 0) { + RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n"); + goto exit; + } + + if (ret != Z_STREAM_END) + goto exit; + + op->consumed = op->src.length - stream.avail_in; + op->produced = op->m_dst->data_len - stream.avail_out; + op->status = RTE_COMP_OP_STATUS_SUCCESS; + + inflateReset(&stream); + + ret = 0; +exit: + inflateEnd(&stream); + + return ret; +} + +/* + * Compresses and decompresses buffer with compressdev API and Zlib API + */ +static int +test_deflate_comp_decomp(const char * const test_bufs[], + unsigned int num_bufs, + uint16_t buf_idx[], + struct rte_comp_xform *compress_xforms[], + struct rte_comp_xform *decompress_xforms[], + unsigned int num_xforms, + enum rte_comp_op_type state, + enum zlib_direction zlib_dir) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + int ret_status = -1; + int ret; + struct rte_mbuf *uncomp_bufs[num_bufs]; + struct rte_mbuf *comp_bufs[num_bufs]; + struct rte_comp_op *ops[num_bufs]; + struct rte_comp_op *ops_processed[num_bufs]; + void *priv_xforms[num_bufs]; + uint16_t num_enqd, num_deqd, num_total_deqd; + uint16_t num_priv_xforms = 0; + unsigned int deqd_retries = 0; + struct priv_op_data *priv_data; + char *data_ptr; + unsigned int i; + const struct rte_compressdev_capabilities *capa = + rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); + + /* Initialize all arrays to NULL */ + memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs); + memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs); + memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs); + memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs); + memset(priv_xforms, 0, sizeof(void *) * num_bufs); + + /* Prepare the source mbufs with the data */ + ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, uncomp_bufs, num_bufs); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Source mbufs could not be allocated " + "from the mempool\n"); + goto exit; + } + + for (i = 0; i < num_bufs; i++) { + data_ptr = rte_pktmbuf_append(uncomp_bufs[i], + strlen(test_bufs[i]) + 1); + snprintf(data_ptr, strlen(test_bufs[i]) + 1, "%s", + test_bufs[i]); + } + + /* Prepare the destination mbufs */ + ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, comp_bufs, num_bufs); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Destination mbufs could not be allocated " + "from the mempool\n"); + goto exit; + } + + for (i = 0; i < num_bufs; i++) + rte_pktmbuf_append(comp_bufs[i], + strlen(test_bufs[i]) * COMPRESS_BUF_SIZE_RATIO); + + /* Build the compression operations */ + ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Compress operations could not be allocated " + "from the mempool\n"); + goto exit; + } + + for (i = 0; i < num_bufs; i++) { + ops[i]->m_src = uncomp_bufs[i]; + ops[i]->m_dst = comp_bufs[i]; + ops[i]->src.offset = 0; + ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]); + ops[i]->dst.offset = 0; + if (state == RTE_COMP_OP_STATELESS) { + ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; + } else { + RTE_LOG(ERR, USER1, + "Stateful operations are not supported " + "in these tests yet\n"); + goto exit; + } + ops[i]->input_chksum = 0; + /* + * Store original operation index in private data, + * since ordering does not have to be maintained, + * when dequeueing from compressdev, so a comparison + * at the end of the test can be done. + */ + priv_data = (struct priv_op_data *) (ops[i] + 1); + priv_data->orig_idx = i; + } + + /* Compress data (either with Zlib API or compressdev API */ + if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) { + for (i = 0; i < num_bufs; i++) { + const struct rte_comp_xform *compress_xform = + compress_xforms[i % num_xforms]; + ret = compress_zlib(ops[i], compress_xform, + DEFAULT_MEM_LEVEL); + if (ret < 0) + goto exit; + + ops_processed[i] = ops[i]; + } + } else { + /* Create compress private xform data */ + for (i = 0; i < num_xforms; i++) { + ret = rte_compressdev_private_xform_create(0, + (const struct rte_comp_xform *)compress_xforms[i], + &priv_xforms[i]); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Compression private xform " + "could not be created\n"); + goto exit; + } + num_priv_xforms++; + } + + if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { + /* Attach shareable private xform data to ops */ + for (i = 0; i < num_bufs; i++) + ops[i]->private_xform = priv_xforms[i % num_xforms]; + } else { + /* Create rest of the private xforms for the other ops */ + for (i = num_xforms; i < num_bufs; i++) { + ret = rte_compressdev_private_xform_create(0, + compress_xforms[i % num_xforms], + &priv_xforms[i]); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Compression private xform " + "could not be created\n"); + goto exit; + } + num_priv_xforms++; + } + + /* Attach non shareable private xform data to ops */ + for (i = 0; i < num_bufs; i++) + ops[i]->private_xform = priv_xforms[i]; + } + + /* Enqueue and dequeue all operations */ + num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs); + if (num_enqd < num_bufs) { + RTE_LOG(ERR, USER1, + "The operations could not be enqueued\n"); + goto exit; + } + + num_total_deqd = 0; + do { + /* + * If retrying a dequeue call, wait for 10 ms to allow + * enough time to the driver to process the operations + */ + if (deqd_retries != 0) { + /* + * Avoid infinite loop if not all the + * operations get out of the device + */ + if (deqd_retries == MAX_DEQD_RETRIES) { + RTE_LOG(ERR, USER1, + "Not all operations could be " + "dequeued\n"); + goto exit; + } + usleep(DEQUEUE_WAIT_TIME); + } + num_deqd = rte_compressdev_dequeue_burst(0, 0, + &ops_processed[num_total_deqd], num_bufs); + num_total_deqd += num_deqd; + deqd_retries++; + } while (num_total_deqd < num_enqd); + + deqd_retries = 0; + + /* Free compress private xforms */ + for (i = 0; i < num_priv_xforms; i++) { + rte_compressdev_private_xform_free(0, priv_xforms[i]); + priv_xforms[i] = NULL; + } + num_priv_xforms = 0; + } + + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + uint16_t xform_idx = priv_data->orig_idx % num_xforms; + const struct rte_comp_compress_xform *compress_xform = + &compress_xforms[xform_idx]->compress; + enum rte_comp_huffman huffman_type = + compress_xform->deflate.huffman; + RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes " + "(level = %d, huffman = %s)\n", + buf_idx[priv_data->orig_idx], + ops_processed[i]->consumed, ops_processed[i]->produced, + compress_xform->level, + huffman_type_strings[huffman_type]); + RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f", + (float)ops_processed[i]->produced / + ops_processed[i]->consumed * 100); + ops[i] = NULL; + } + + /* + * Check operation status and free source mbufs (destination mbuf and + * compress operation information is needed for the decompression stage) + */ + for (i = 0; i < num_bufs; i++) { + if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, USER1, + "Some operations were not successful\n"); + goto exit; + } + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]); + uncomp_bufs[priv_data->orig_idx] = NULL; + } + + /* Allocate buffers for decompressed data */ + ret = rte_pktmbuf_alloc_bulk(ts_params->mbuf_pool, uncomp_bufs, num_bufs); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Destination mbufs could not be allocated " + "from the mempool\n"); + goto exit; + } + + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + rte_pktmbuf_append(uncomp_bufs[i], + strlen(test_bufs[priv_data->orig_idx]) + 1); + } + + /* Build the decompression operations */ + ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Decompress operations could not be allocated " + "from the mempool\n"); + goto exit; + } + + /* Source buffer is the compressed data from the previous operations */ + for (i = 0; i < num_bufs; i++) { + ops[i]->m_src = ops_processed[i]->m_dst; + ops[i]->m_dst = uncomp_bufs[i]; + ops[i]->src.offset = 0; + /* + * Set the length of the compressed data to the + * number of bytes that were produced in the previous stage + */ + ops[i]->src.length = ops_processed[i]->produced; + ops[i]->dst.offset = 0; + if (state == RTE_COMP_OP_STATELESS) { + ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; + } else { + RTE_LOG(ERR, USER1, + "Stateful operations are not supported " + "in these tests yet\n"); + goto exit; + } + ops[i]->input_chksum = 0; + /* + * Copy private data from previous operations, + * to keep the pointer to the original buffer + */ + memcpy(ops[i] + 1, ops_processed[i] + 1, + sizeof(struct priv_op_data)); + } + + /* + * Free the previous compress operations, + * as it is not needed anymore + */ + for (i = 0; i < num_bufs; i++) { + rte_comp_op_free(ops_processed[i]); + ops_processed[i] = NULL; + } + + /* Decompress data (either with Zlib API or compressdev API */ + if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) { + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops[i] + 1); + uint16_t xform_idx = priv_data->orig_idx % num_xforms; + const struct rte_comp_xform *decompress_xform = + decompress_xforms[xform_idx]; + + ret = decompress_zlib(ops[i], decompress_xform); + if (ret < 0) + goto exit; + + ops_processed[i] = ops[i]; + } + } else { + /* Create decompress private xform data */ + for (i = 0; i < num_xforms; i++) { + ret = rte_compressdev_private_xform_create(0, + (const struct rte_comp_xform *)decompress_xforms[i], + &priv_xforms[i]); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Decompression private xform " + "could not be created\n"); + goto exit; + } + num_priv_xforms++; + } + + if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { + /* Attach shareable private xform data to ops */ + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops[i] + 1); + uint16_t xform_idx = priv_data->orig_idx % + num_xforms; + ops[i]->private_xform = priv_xforms[xform_idx]; + } + } else { + /* Create rest of the private xforms for the other ops */ + for (i = num_xforms; i < num_bufs; i++) { + ret = rte_compressdev_private_xform_create(0, + decompress_xforms[i % num_xforms], + &priv_xforms[i]); + if (ret < 0) { + RTE_LOG(ERR, USER1, + "Decompression private xform " + "could not be created\n"); + goto exit; + } + num_priv_xforms++; + } + + /* Attach non shareable private xform data to ops */ + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops[i] + 1); + uint16_t xform_idx = priv_data->orig_idx; + ops[i]->private_xform = priv_xforms[xform_idx]; + } + } + + /* Enqueue and dequeue all operations */ + num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs); + if (num_enqd < num_bufs) { + RTE_LOG(ERR, USER1, + "The operations could not be enqueued\n"); + goto exit; + } + + num_total_deqd = 0; + do { + /* + * If retrying a dequeue call, wait for 10 ms to allow + * enough time to the driver to process the operations + */ + if (deqd_retries != 0) { + /* + * Avoid infinite loop if not all the + * operations get out of the device + */ + if (deqd_retries == MAX_DEQD_RETRIES) { + RTE_LOG(ERR, USER1, + "Not all operations could be " + "dequeued\n"); + goto exit; + } + usleep(DEQUEUE_WAIT_TIME); + } + num_deqd = rte_compressdev_dequeue_burst(0, 0, + &ops_processed[num_total_deqd], num_bufs); + num_total_deqd += num_deqd; + deqd_retries++; + } while (num_total_deqd < num_enqd); + + deqd_retries = 0; + } + + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n", + buf_idx[priv_data->orig_idx], + ops_processed[i]->consumed, ops_processed[i]->produced); + ops[i] = NULL; + } + + /* + * Check operation status and free source mbuf (destination mbuf and + * compress operation information is still needed) + */ + for (i = 0; i < num_bufs; i++) { + if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, USER1, + "Some operations were not successful\n"); + goto exit; + } + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]); + comp_bufs[priv_data->orig_idx] = NULL; + } + + /* + * Compare the original stream with the decompressed stream + * (in size and the data) + */ + for (i = 0; i < num_bufs; i++) { + priv_data = (struct priv_op_data *)(ops_processed[i] + 1); + const char *buf1 = test_bufs[priv_data->orig_idx]; + const char *buf2 = rte_pktmbuf_mtod(ops_processed[i]->m_dst, + const char *); + + if (compare_buffers(buf1, strlen(buf1) + 1, + buf2, ops_processed[i]->produced) < 0) + goto exit; + } + + ret_status = 0; + +exit: + /* Free resources */ + for (i = 0; i < num_bufs; i++) { + rte_pktmbuf_free(uncomp_bufs[i]); + rte_pktmbuf_free(comp_bufs[i]); + rte_comp_op_free(ops[i]); + rte_comp_op_free(ops_processed[i]); + } + for (i = 0; i < num_priv_xforms; i++) { + if (priv_xforms[i] != NULL) + rte_compressdev_private_xform_free(0, priv_xforms[i]); + } + + return ret_status; +} + +static int +test_compressdev_deflate_stateless_fixed(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + const char *test_buffer; + uint16_t i; + int ret; + struct rte_comp_xform *compress_xform = + rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); + + if (compress_xform == NULL) { + RTE_LOG(ERR, USER1, + "Compress xform could not be created\n"); + ret = TEST_FAILED; + goto exit; + } + + memcpy(compress_xform, ts_params->def_comp_xform, + sizeof(struct rte_comp_xform)); + compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED; + + for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { + test_buffer = compress_test_bufs[i]; + + /* Compress with compressdev, decompress with Zlib */ + if (test_deflate_comp_decomp(&test_buffer, 1, + &i, + &compress_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_DECOMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + + /* Compress with Zlib, decompress with compressdev */ + if (test_deflate_comp_decomp(&test_buffer, 1, + &i, + &compress_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_COMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + } + + ret = TEST_SUCCESS; + +exit: + rte_free(compress_xform); + return ret; +} + +static int +test_compressdev_deflate_stateless_dynamic(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + const char *test_buffer; + uint16_t i; + int ret; + struct rte_comp_xform *compress_xform = + rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); + + if (compress_xform == NULL) { + RTE_LOG(ERR, USER1, + "Compress xform could not be created\n"); + ret = TEST_FAILED; + goto exit; + } + + memcpy(compress_xform, ts_params->def_comp_xform, + sizeof(struct rte_comp_xform)); + compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC; + + for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { + test_buffer = compress_test_bufs[i]; + + /* Compress with compressdev, decompress with Zlib */ + if (test_deflate_comp_decomp(&test_buffer, 1, + &i, + &compress_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_DECOMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + + /* Compress with Zlib, decompress with compressdev */ + if (test_deflate_comp_decomp(&test_buffer, 1, + &i, + &compress_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_COMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + } + + ret = TEST_SUCCESS; + +exit: + rte_free(compress_xform); + return ret; +} + +static int +test_compressdev_deflate_stateless_multi_op(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t num_bufs = RTE_DIM(compress_test_bufs); + uint16_t buf_idx[num_bufs]; + uint16_t i; + + for (i = 0; i < num_bufs; i++) + buf_idx[i] = i; + + /* Compress with compressdev, decompress with Zlib */ + if (test_deflate_comp_decomp(compress_test_bufs, num_bufs, + buf_idx, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_DECOMPRESS) < 0) + return TEST_FAILED; + + /* Compress with Zlib, decompress with compressdev */ + if (test_deflate_comp_decomp(compress_test_bufs, num_bufs, + buf_idx, + &ts_params->def_comp_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_COMPRESS) < 0) + return TEST_FAILED; + + return TEST_SUCCESS; +} + +static int +test_compressdev_deflate_stateless_multi_level(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + const char *test_buffer; + unsigned int level; + uint16_t i; + int ret; + struct rte_comp_xform *compress_xform = + rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); + + if (compress_xform == NULL) { + RTE_LOG(ERR, USER1, + "Compress xform could not be created\n"); + ret = TEST_FAILED; + goto exit; + } + + memcpy(compress_xform, ts_params->def_comp_xform, + sizeof(struct rte_comp_xform)); + + for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { + test_buffer = compress_test_bufs[i]; + for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX; + level++) { + compress_xform->compress.level = level; + /* Compress with compressdev, decompress with Zlib */ + if (test_deflate_comp_decomp(&test_buffer, 1, + &i, + &compress_xform, + &ts_params->def_decomp_xform, + 1, + RTE_COMP_OP_STATELESS, + ZLIB_DECOMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + } + } + + ret = TEST_SUCCESS; + +exit: + rte_free(compress_xform); + return ret; +} + +#define NUM_XFORMS 3 +static int +test_compressdev_deflate_stateless_multi_xform(void) +{ + struct comp_testsuite_params *ts_params = &testsuite_params; + uint16_t num_bufs = NUM_XFORMS; + struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL}; + struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL}; + const char *test_buffers[NUM_XFORMS]; + uint16_t i; + unsigned int level = RTE_COMP_LEVEL_MIN; + uint16_t buf_idx[num_bufs]; + + int ret; + + /* Create multiple xforms with various levels */ + for (i = 0; i < NUM_XFORMS; i++) { + compress_xforms[i] = rte_malloc(NULL, + sizeof(struct rte_comp_xform), 0); + if (compress_xforms[i] == NULL) { + RTE_LOG(ERR, USER1, + "Compress xform could not be created\n"); + ret = TEST_FAILED; + goto exit; + } + + memcpy(compress_xforms[i], ts_params->def_comp_xform, + sizeof(struct rte_comp_xform)); + compress_xforms[i]->compress.level = level; + level++; + + decompress_xforms[i] = rte_malloc(NULL, + sizeof(struct rte_comp_xform), 0); + if (decompress_xforms[i] == NULL) { + RTE_LOG(ERR, USER1, + "Decompress xform could not be created\n"); + ret = TEST_FAILED; + goto exit; + } + + memcpy(decompress_xforms[i], ts_params->def_decomp_xform, + sizeof(struct rte_comp_xform)); + } + + for (i = 0; i < NUM_XFORMS; i++) { + buf_idx[i] = 0; + /* Use the same buffer in all sessions */ + test_buffers[i] = compress_test_bufs[0]; + } + /* Compress with compressdev, decompress with Zlib */ + if (test_deflate_comp_decomp(test_buffers, num_bufs, + buf_idx, + compress_xforms, + decompress_xforms, + NUM_XFORMS, + RTE_COMP_OP_STATELESS, + ZLIB_DECOMPRESS) < 0) { + ret = TEST_FAILED; + goto exit; + } + + ret = TEST_SUCCESS; +exit: + for (i = 0; i < NUM_XFORMS; i++) { + rte_free(compress_xforms[i]); + rte_free(decompress_xforms[i]); + } + + return ret; +} + +static struct unit_test_suite compressdev_testsuite = { + .suite_name = "compressdev unit test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(NULL, NULL, + test_compressdev_invalid_configuration), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_fixed), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_dynamic), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_op), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_level), + TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, + test_compressdev_deflate_stateless_multi_xform), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_compressdev(void) +{ + return unit_test_suite_runner(&compressdev_testsuite); +} + +REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev); diff --git a/test/test/test_compressdev_test_buffer.h b/test/test/test_compressdev_test_buffer.h new file mode 100644 index 00000000..c0492f89 --- /dev/null +++ b/test/test/test_compressdev_test_buffer.h @@ -0,0 +1,295 @@ +#ifndef TEST_COMPRESSDEV_TEST_BUFFERS_H_ +#define TEST_COMPRESSDEV_TEST_BUFFERS_H_ + +/* + * These test buffers are snippets obtained + * from the Canterbury and Calgary Corpus + * collection. + */ + +/* Snippet of Alice's Adventures in Wonderland */ +static const char test_buf_alice[] = + " Alice was beginning to get very tired of sitting by her sister\n" + "on the bank, and of having nothing to do: once or twice she had\n" + "peeped into the book her sister was reading, but it had no\n" + "pictures or conversations in it, `and what is the use of a book,'\n" + "thought Alice `without pictures or conversation?'\n\n" + " So she was considering in her own mind (as well as she could,\n" + "for the hot day made her feel very sleepy and stupid), whether\n" + "the pleasure of making a daisy-chain would be worth the trouble\n" + "of getting up and picking the daisies, when suddenly a White\n" + "Rabbit with pink eyes ran close by her.\n\n" + " There was nothing so VERY remarkable in that; nor did Alice\n" + "think it so VERY much out of the way to hear the Rabbit say to\n" + "itself, `Oh dear! Oh dear! I shall be late!' (when she thought\n" + "it over afterwards, it occurred to her that she ought to have\n" + "wondered at this, but at the time it all seemed quite natural);\n" + "but when the Rabbit actually TOOK A WATCH OUT OF ITS WAISTCOAT-\n" + "POCKET, and looked at it, and then hurried on, Alice started to\n" + "her feet, for it flashed across her mind that she had never\n" + "before seen a rabbit with either a waistcoat-pocket, or a watch to\n" + "take out of it, and burning with curiosity, she ran across the\n" + "field after it, and fortunately was just in time to see it pop\n" + "down a large rabbit-hole under the hedge.\n\n" + " In another moment down went Alice after it, never once\n" + "considering how in the world she was to get out again.\n\n" + " The rabbit-hole went straight on like a tunnel for some way,\n" + "and then dipped suddenly down, so suddenly that Alice had not a\n" + "moment to think about stopping herself before she found herself\n" + "falling down a very deep well.\n\n" + " Either the well was very deep, or she fell very slowly, for she\n" + "had plenty of time as she went down to look about her and to\n" + "wonder what was going to happen next. First, she tried to look\n" + "down and make out what she was coming to, but it was too dark to\n" + "see anything; then she looked at the sides of the well, and\n" + "noticed that they were filled with cupboards and book-shelves;\n" + "here and there she saw maps and pictures hung upon pegs. She\n" + "took down a jar from one of the shelves as she passed; it was\n" + "labelled `ORANGE MARMALADE', but to her great disappointment it\n" + "was empty: she did not like to drop the jar for fear of killing\n" + "somebody, so managed to put it into one of the cupboards as she\n" + "fell past it.\n\n" + " `Well!' thought Alice to herself, `after such a fall as this, I\n" + "shall think nothing of tumbling down stairs! How brave they'll\n" + "all think me at home! Why, I wouldn't say anything about it,\n" + "even if I fell off the top of the house!' (Which was very likely\n" + "true.)\n\n" + " Down, down, down. Would the fall NEVER come to an end! `I\n" + "wonder how many miles I've fallen by this time?' she said aloud.\n" + "`I must be getting somewhere near the centre of the earth. Let\n" + "me see: that would be four thousand miles down, I think--' (for,\n" + "you see, Alice had learnt several things of this sort in her\n" + "lessons in the schoolroom, and though this was not a VERY good\n" + "opportunity for showing off her knowledge, as there was no one to\n" + "listen to her, still it was good practice to say it over) `--yes,\n" + "that's about the right distance--but then I wonder what Latitude\n" + "or Longitude I've got to?' (Alice had no idea what Latitude was,\n" + "or Longitude either, but thought they were nice grand words to\n" + "say.)\n\n" + " Presently she began again. `I wonder if I shall fall right\n" + "THROUGH the earth! How funny it'll seem to come out among the\n" + "people that walk with their heads downward! The Antipathies, I\n" + "think--' (she was rather glad there WAS no one listening, this\n" + "time, as it didn't sound at all the right word) `--but I shall\n" + "have to ask them what the name of the country is, you know.\n" + "Please, Ma'am, is this New Zealand or Australia?' (and she tried\n" + "to curtsey as she spoke--fancy CURTSEYING as you're falling\n" + "through the air! Do you think you could manage it?) `And what\n" + "an ignorant little girl she'll think me for asking! No, it'll\n" + "never do to ask: perhaps I shall see it written up somewhere.'\n" + " Down, down, down. There was nothing else to do, so Alice soon\n" + "began talking again. `Dinah'll miss me very much to-night, I\n" + "should think!' (Dinah was the cat.) `I hope they'll remember\n" + "her saucer of milk at tea-time. Dinah my dear! I wish you were\n" + "down here with me! There are no mice in the air, I'm afraid, but\n" + "you might catch a bat, and that's very like a mouse, you know.\n" + "But do cats eat bats, I wonder?' And here Alice began to get\n" + "rather sleepy, and went on saying to herself, in a dreamy sort of\n" + "way, `Do cats eat bats? Do cats eat bats?' and sometimes, `Do\n" + "bats eat cats?' for, you see, as she couldn't answer either\n" + "question, it didn't much matter which way she put it. She felt\n" + "that she was dozing off, and had just begun to dream that she\n" + "was walking hand in hand with Dinah, and saying to her very\n" + "earnestly, `Now, Dinah, tell me the truth: did you ever eat a\n" + "bat?' when suddenly, thump! thump! down she came upon a heap of\n" + "sticks and dry leaves, and the fall was over.\n\n"; + +/* Snippet of Shakespeare play */ +static const char test_buf_shakespeare[] = + "CHARLES wrestler to Frederick.\n" + "\n" + "\n" + "OLIVER |\n" + " |\n" + "JAQUES (JAQUES DE BOYS:) | sons of Sir Rowland de Boys.\n" + " |\n" + "ORLANDO |\n" + "\n" + "\n" + "ADAM |\n" + " | servants to Oliver.\n" + "DENNIS |\n" + "\n" + "\n" + "TOUCHSTONE a clown.\n" + "\n" + "SIR OLIVER MARTEXT a vicar.\n" + "\n" + "\n" + "CORIN |\n" + " | shepherds.\n" + "SILVIUS |\n" + "\n" + "\n" + "WILLIAM a country fellow in love with Audrey.\n" + "\n" + " A person representing HYMEN. (HYMEN:)\n" + "\n" + "ROSALIND daughter to the banished duke.\n" + "\n" + "CELIA daughter to Frederick.\n" + "\n" + "PHEBE a shepherdess.\n" + "\n" + "AUDREY a country wench.\n" + "\n" + " Lords, pages, and attendants, &c.\n" + " (Forester:)\n" + " (A Lord:)\n" + " (First Lord:)\n" + " (Second Lord:)\n" + " (First Page:)\n" + " (Second Page:)\n" + "\n" + "\n" + "SCENE Oliver's house; Duke Frederick's court; and the\n" + " Forest of Arden.\n" + "\n" + "\n" + "\n" + "\n" + " AS YOU LIKE IT\n" + "\n" + "\n" + "ACT I\n" + "\n" + "\n" + "\n" + "SCENE I Orchard of Oliver's house.\n" + "\n" + "\n" + " [Enter ORLANDO and ADAM]\n" + "\n" + "ORLANDO As I remember, Adam, it was upon this fashion\n" + " bequeathed me by will but poor a thousand crowns,\n" + " and, as thou sayest, charged my brother, on his\n" + " blessing, to breed me well: and there begins my\n" + " sadness. My brother Jaques he keeps at school, and\n" + " report speaks goldenly of his profit: for my part,\n" + " he keeps me rustically at home, or, to speak more\n" + " properly, stays me here at home unkept; for call you\n" + " that keeping for a gentleman of my birth, that\n" + " differs not from the stalling of an ox? His horses\n" + " are bred better; for, besides that they are fair\n" + " with their feeding, they are taught their manage,\n" + " and to that end riders dearly hired: but I, his\n" + " brother, gain nothing under him but growth; for the\n" + " which his animals on his dunghills are as much\n" + " bound to him as I. Besides this nothing that he so\n" + " plentifully gives me, the something that nature gave\n" + " me his countenance seems to take from me: he lets\n" + " me feed with his hinds, bars me the place of a\n" + " brother, and, as much as in him lies, mines my\n" + " gentility with my education. This is it, Adam, that\n" + " grieves me; and the spirit of my father, which I\n" + " think is within me, begins to mutiny against this\n" + " servitude: I will no longer endure it, though yet I\n" + " know no wise remedy how to avoid it.\n" + "\n" + "ADAM Yonder comes my master, your brother.\n" + "\n" + "ORLANDO Go apart, Adam, and thou shalt hear how he will\n"; + +/* Snippet of source code in Pascal */ +static const char test_buf_pascal[] = + " Ptr = 1..DMem;\n" + " Loc = 1..IMem;\n" + " Loc0 = 0..IMem;\n" + " EdgeT = (hout,lin,hin,lout); {Warning this order is important in}\n" + " {predicates such as gtS,geS}\n" + " CardT = (finite,infinite);\n" + " ExpT = Minexp..Maxexp;\n" + " ManT = Mininf..Maxinf; \n" + " Pflag = (PNull,PSoln,PTrace,PPrint);\n" + " Sreal = record\n" + " edge:EdgeT;\n" + " cardinality:CardT;\n" + " exp:ExpT; {exponent}\n" + " mantissa:ManT;\n" + " end;\n" + " Int = record\n" + " hi:Sreal;\n" + " lo:Sreal;\n" + " end;\n" + " Instr = record\n" + " Code:OpType;\n" + " Pars: array[0..Par] of 0..DMem;\n" + " end;\n" + " DataMem= record\n" + " D :array [Ptr] of Int;\n" + " S :array [Loc] of State;\n" + " LastHalve:Loc;\n" + " RHalve :array [Loc] of real;\n" + " end;\n" + " DataFlags=record\n" + " PF :array [Ptr] of Pflag;\n" + " end;\n" + "var\n" + " Debug : (none,activity,post,trace,dump);\n" + " Cut : (once,all);\n" + " GlobalEnd,Verifiable:boolean;\n" + " HalveThreshold:real;\n" + " I : array [Loc] of Instr; {Memory holding instructions}\n" + " End : Loc; {last instruction in I}\n" + " ParN : array [OpType] of -1..Par; {number of parameters for each \n" + " opcode. -1 means no result}\n" + " ParIntersect : array [OpType] of boolean ;\n" + " DInit : DataMem; {initial memory which is cleared and \n" + " used in first call}\n" + " DF : DataFlags; {hold flags for variables, e.g. print/trace}\n" + " MaxDMem:0..DMem;\n" + " Shift : array[0..Digits] of 1..maxint;{array of constant multipliers}\n" + " {used for alignment etc.}\n" + " Dummy :Positive;\n" + " {constant intervals and Sreals}\n" + " PlusInfS,MinusInfS,PlusSmallS,MinusSmallS,ZeroS,\n" + " PlusFiniteS,MinusFiniteS:Sreal;\n" + " Zero,All,AllFinite:Int;\n" + "\n" + "procedure deblank;\n" + "var Ch:char;\n" + "begin\n" + " while (not eof) and (input^ in [' ',' ']) do read(Ch);\n" + "end;\n" + "\n" + "procedure InitialOptions;\n" + "\n" + "#include '/user/profs/cleary/bin/options.i';\n" + "\n" + " procedure Option;\n" + " begin\n" + " case Opt of\n" + " 'a','A':Debug:=activity;\n" + " 'd','D':Debug:=dump;\n" + " 'h','H':HalveThreshold:=StringNum/100;\n" + " 'n','N':Debug:=none;\n" + " 'p','P':Debug:=post;\n" + " 't','T':Debug:=trace;\n" + " 'v','V':Verifiable:=true;\n" + " end;\n" + " end;\n" + "\n" + "begin\n" + " Debug:=trace;\n" + " Verifiable:=false;\n" + " HalveThreshold:=67/100;\n" + " Options;\n" + " writeln(Debug);\n" + " writeln('Verifiable:',Verifiable);\n" + " writeln('Halve threshold',HalveThreshold);\n" + "end;{InitialOptions}\n" + "\n" + "procedure NormalizeUp(E,M:integer;var S:Sreal;var Closed:boolean);\n" + "begin\n" + "with S do\n" + "begin\n" + " if M=0 then S:=ZeroS else\n" + " if M>0 then\n"; + +static const char * const compress_test_bufs[] = { + test_buf_alice, + test_buf_shakespeare, + test_buf_pascal +}; + +#endif /* TEST_COMPRESSDEV_TEST_BUFFERS_H_ */ diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c index 1417482c..389f7967 100644 --- a/test/test/test_cryptodev.c +++ b/test/test/test_cryptodev.c @@ -21,6 +21,8 @@ #include <rte_cryptodev_scheduler_operations.h> #endif +#include <rte_lcore.h> + #include "test.h" #include "test_cryptodev.h" @@ -36,6 +38,8 @@ #include "test_cryptodev_aead_test_vectors.h" #include "test_cryptodev_hmac_test_vectors.h" +#define VDEV_ARGS_SIZE 100 + static int gbl_driver_id; struct crypto_testsuite_params { @@ -316,40 +320,81 @@ testsuite_setup(void) } } - /* Create a MRVL device if required */ + /* Create a MVSAM device if required */ if (gbl_driver_id == rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_MRVL_PMD))) { -#ifndef RTE_LIBRTE_PMD_MRVL_CRYPTO - RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO must be" - " enabled in config file to run this testsuite.\n"); - return TEST_FAILED; -#endif + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD))) { nb_devs = rte_cryptodev_device_count_by_driver( rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD))); + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD))); if (nb_devs < 1) { ret = rte_vdev_init( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD), NULL); TEST_ASSERT(ret == 0, "Failed to create " "instance of pmd : %s", - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)); + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)); + } + } + + /* Create an CCP device if required */ + if (gbl_driver_id == rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD))) { + nb_devs = rte_cryptodev_device_count_by_driver( + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD))); + if (nb_devs < 1) { + ret = rte_vdev_init( + RTE_STR(CRYPTODEV_NAME_CCP_PMD), + NULL); + + TEST_ASSERT(ret == 0, "Failed to create " + "instance of pmd : %s", + RTE_STR(CRYPTODEV_NAME_CCP_PMD)); } } #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER + char vdev_args[VDEV_ARGS_SIZE] = {""}; + char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core," + "ordering=enable,name=cryptodev_test_scheduler,corelist="}; + uint16_t slave_core_count = 0; + uint16_t socket_id = 0; + if (gbl_driver_id == rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) { + /* Identify the Slave Cores + * Use 2 slave cores for the device args + */ + RTE_LCORE_FOREACH_SLAVE(i) { + if (slave_core_count > 1) + break; + snprintf(vdev_args, sizeof(vdev_args), + "%s%d", temp_str, i); + strcpy(temp_str, vdev_args); + strcat(temp_str, ";"); + slave_core_count++; + socket_id = lcore_config[i].socket_id; + } + if (slave_core_count != 2) { + RTE_LOG(ERR, USER1, + "Cryptodev scheduler test require at least " + "two slave cores to run. " + "Please use the correct coremask.\n"); + return TEST_FAILED; + } + strcpy(temp_str, vdev_args); + snprintf(vdev_args, sizeof(vdev_args), "%s,socket_id=%d", + temp_str, socket_id); + RTE_LOG(DEBUG, USER1, "vdev_args: %s\n", vdev_args); nb_devs = rte_cryptodev_device_count_by_driver( rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))); if (nb_devs < 1) { ret = rte_vdev_init( RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), - NULL); - + vdev_args); TEST_ASSERT(ret == 0, "Failed to create instance %u of" " pmd : %s", @@ -383,7 +428,8 @@ testsuite_setup(void) ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs; ts_params->conf.socket_id = SOCKET_ID_ANY; - unsigned int session_size = rte_cryptodev_get_private_session_size(dev_id); + unsigned int session_size = + rte_cryptodev_sym_get_private_session_size(dev_id); /* * Create mempool with maximum number of sessions * 2, @@ -1727,6 +1773,44 @@ test_AES_cipheronly_openssl_all(void) } static int +test_AES_chain_ccp_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)), + BLKCIPHER_AES_CHAIN_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int +test_AES_cipheronly_ccp_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)), + BLKCIPHER_AES_CIPHERONLY_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int test_AES_chain_qat_all(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -1765,6 +1849,25 @@ test_AES_cipheronly_qat_all(void) } static int +test_AES_cipheronly_virtio_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)), + BLKCIPHER_AES_CIPHERONLY_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int test_AES_chain_dpaa_sec_all(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -1898,6 +2001,25 @@ test_authonly_openssl_all(void) } static int +test_authonly_ccp_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)), + BLKCIPHER_AUTHONLY_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int test_AES_chain_armv8_all(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -1927,7 +2049,7 @@ test_AES_chain_mrvl_all(void) ts_params->session_mpool, ts_params->valid_devs[0], rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)), BLKCIPHER_AES_CHAIN_TYPE); TEST_ASSERT_EQUAL(status, 0, "Test failed"); @@ -1946,7 +2068,7 @@ test_AES_cipheronly_mrvl_all(void) ts_params->session_mpool, ts_params->valid_devs[0], rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)), BLKCIPHER_AES_CIPHERONLY_TYPE); TEST_ASSERT_EQUAL(status, 0, "Test failed"); @@ -1965,7 +2087,7 @@ test_authonly_mrvl_all(void) ts_params->session_mpool, ts_params->valid_devs[0], rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)), BLKCIPHER_AUTHONLY_TYPE); TEST_ASSERT_EQUAL(status, 0, "Test failed"); @@ -1984,7 +2106,7 @@ test_3DES_chain_mrvl_all(void) ts_params->session_mpool, ts_params->valid_devs[0], rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)), BLKCIPHER_3DES_CHAIN_TYPE); TEST_ASSERT_EQUAL(status, 0, "Test failed"); @@ -2003,7 +2125,7 @@ test_3DES_cipheronly_mrvl_all(void) ts_params->session_mpool, ts_params->valid_devs[0], rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)), + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)), BLKCIPHER_3DES_CIPHERONLY_TYPE); TEST_ASSERT_EQUAL(status, 0, "Test failed"); @@ -4973,6 +5095,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void) } static int +test_3DES_chain_ccp_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)), + BLKCIPHER_3DES_CHAIN_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int +test_3DES_cipheronly_ccp_all(void) +{ + struct crypto_testsuite_params *ts_params = &testsuite_params; + int status; + + status = test_blockcipher_all_tests(ts_params->mbuf_pool, + ts_params->op_mpool, + ts_params->session_mpool, + ts_params->valid_devs[0], + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)), + BLKCIPHER_3DES_CIPHERONLY_TYPE); + + TEST_ASSERT_EQUAL(status, 0, "Test failed"); + + return TEST_SUCCESS; +} + +static int test_3DES_cipheronly_qat_all(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -6456,7 +6616,7 @@ test_multi_session_random_usage(void) sessions[i] = rte_cryptodev_sym_session_create( ts_params->session_mpool); - rte_memcpy(&ut_paramz[i].ut_params, &testsuite_params, + rte_memcpy(&ut_paramz[i].ut_params, &unittest_params, sizeof(struct crypto_unittest_params)); test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( @@ -8375,7 +8535,8 @@ test_scheduler_attach_slave_op(void) rte_mempool_free(ts_params->session_mpool); ts_params->session_mpool = NULL; } - unsigned int session_size = rte_cryptodev_get_private_session_size(i); + unsigned int session_size = + rte_cryptodev_sym_get_private_session_size(i); /* * Create mempool with maximum number of sessions * 2, @@ -8428,33 +8589,47 @@ test_scheduler_detach_slave_op(void) } static int -test_scheduler_mode_op(void) +test_scheduler_mode_op(enum rte_cryptodev_scheduler_mode scheduler_mode) { struct crypto_testsuite_params *ts_params = &testsuite_params; uint8_t sched_id = ts_params->valid_devs[0]; - struct rte_cryptodev_scheduler_ops op = {0}; - struct rte_cryptodev_scheduler dummy_scheduler = { - .description = "dummy scheduler to test mode", - .name = "dummy scheduler", - .mode = CDEV_SCHED_MODE_USERDEFINED, - .ops = &op - }; - int ret; + /* set mode */ + return rte_cryptodev_scheduler_mode_set(sched_id, + scheduler_mode); +} + +static int +test_scheduler_mode_roundrobin_op(void) +{ + TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_ROUNDROBIN) == + 0, "Failed to set roundrobin mode"); + return 0; + +} + +static int +test_scheduler_mode_multicore_op(void) +{ + TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_MULTICORE) == + 0, "Failed to set multicore mode"); + + return 0; +} + +static int +test_scheduler_mode_failover_op(void) +{ + TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_FAILOVER) == + 0, "Failed to set failover mode"); + + return 0; +} - /* set user defined mode */ - ret = rte_cryptodev_scheduler_load_user_scheduler(sched_id, - &dummy_scheduler); - TEST_ASSERT(ret == 0, - "Failed to set cdev %u to user defined mode", sched_id); - - /* set round robin mode */ - ret = rte_cryptodev_scheduler_mode_set(sched_id, - CDEV_SCHED_MODE_ROUNDROBIN); - TEST_ASSERT(ret == 0, - "Failed to set cdev %u to round-robin mode", sched_id); - TEST_ASSERT(rte_cryptodev_scheduler_mode_get(sched_id) == - CDEV_SCHED_MODE_ROUNDROBIN, "Scheduling Mode " - "not match"); +static int +test_scheduler_mode_pkt_size_distr_op(void) +{ + TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_PKT_SIZE_DISTR) == + 0, "Failed to set pktsize mode"); return 0; } @@ -8464,8 +8639,20 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = { .setup = testsuite_setup, .teardown = testsuite_teardown, .unit_test_cases = { + /* Multi Core */ + TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op), + TEST_CASE_ST(NULL, NULL, test_scheduler_mode_multicore_op), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_chain_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_cipheronly_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_authonly_scheduler_all), + TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op), + + /* Round Robin */ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op), - TEST_CASE_ST(NULL, NULL, test_scheduler_mode_op), + TEST_CASE_ST(NULL, NULL, test_scheduler_mode_roundrobin_op), TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_scheduler_all), TEST_CASE_ST(ut_setup, ut_teardown, @@ -8473,6 +8660,29 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = { TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_scheduler_all), TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op), + + /* Fail over */ + TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op), + TEST_CASE_ST(NULL, NULL, test_scheduler_mode_failover_op), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_chain_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_cipheronly_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_authonly_scheduler_all), + TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op), + + /* PKT SIZE */ + TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op), + TEST_CASE_ST(NULL, NULL, test_scheduler_mode_pkt_size_distr_op), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_chain_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_cipheronly_scheduler_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_authonly_scheduler_all), + TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op), + TEST_CASES_END() /**< NULL terminate unit test array */ } }; @@ -8767,6 +8977,18 @@ static struct unit_test_suite cryptodev_qat_testsuite = { } }; +static struct unit_test_suite cryptodev_virtio_testsuite = { + .suite_name = "Crypto VIRTIO Unit Test Suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_cipheronly_virtio_all), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + static struct unit_test_suite cryptodev_aesni_mb_testsuite = { .suite_name = "Crypto Device AESNI MB Unit Test Suite", .setup = testsuite_setup, @@ -9646,6 +9868,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite = { } }; +static struct unit_test_suite cryptodev_ccp_testsuite = { + .suite_name = "Crypto Device CCP Unit Test Suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session), + TEST_CASE_ST(ut_setup, ut_teardown, + test_multi_session_random_usage), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_chain_ccp_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_AES_cipheronly_ccp_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_3DES_chain_ccp_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_3DES_cipheronly_ccp_all), + TEST_CASE_ST(ut_setup, ut_teardown, + test_authonly_ccp_all), + + /** Negative tests */ + TEST_CASE_ST(ut_setup, ut_teardown, + authentication_verify_HMAC_SHA1_fail_data_corrupt), + TEST_CASE_ST(ut_setup, ut_teardown, + authentication_verify_HMAC_SHA1_fail_tag_corrupt), + TEST_CASE_ST(ut_setup, ut_teardown, + auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt), + TEST_CASE_ST(ut_setup, ut_teardown, + auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; static int test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/) @@ -9664,6 +9918,22 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/) } static int +test_cryptodev_virtio(void /*argv __rte_unused, int argc __rte_unused*/) +{ + gbl_driver_id = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)); + + if (gbl_driver_id == -1) { + RTE_LOG(ERR, USER1, "VIRTIO PMD must be loaded. Check if " + "CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO is enabled " + "in config file to run this testsuite.\n"); + return TEST_FAILED; + } + + return unit_test_suite_runner(&cryptodev_virtio_testsuite); +} + +static int test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/) { gbl_driver_id = rte_cryptodev_driver_id_get( @@ -9795,11 +10065,11 @@ static int test_cryptodev_mrvl(void) { gbl_driver_id = rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)); + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)); if (gbl_driver_id == -1) { - RTE_LOG(ERR, USER1, "MRVL PMD must be loaded. Check if " - "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO is enabled " + RTE_LOG(ERR, USER1, "MVSAM PMD must be loaded. Check if " + "CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO is enabled " "in config file to run this testsuite.\n"); return TEST_SKIPPED; } @@ -9867,6 +10137,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/) return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite); } +static int +test_cryptodev_ccp(void) +{ + gbl_driver_id = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)); + + if (gbl_driver_id == -1) { + RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if " + "CONFIG_RTE_LIBRTE_PMD_CCP is enabled " + "in config file to run this testsuite.\n"); + return TEST_FAILED; + } + + return unit_test_suite_runner(&cryptodev_ccp_testsuite); +} + REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat); REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb); REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl); @@ -9876,6 +10162,8 @@ REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g); REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi); REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc); REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8); -REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl); +REGISTER_TEST_COMMAND(cryptodev_sw_mvsam_autotest, test_cryptodev_mrvl); REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec); REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec); +REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp); +REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio); diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h index 8cdc0874..1bd44dcd 100644 --- a/test/test/test_cryptodev.h +++ b/test/test/test_cryptodev.h @@ -58,9 +58,12 @@ #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc #define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8 +#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec #define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec #define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler -#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl +#define CRYPTODEV_NAME_MVSAM_PMD crypto_mvsam +#define CRYPTODEV_NAME_CCP_PMD crypto_ccp +#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio /** * Write (spread) data from buffer to mbuf data diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h index 3577ef4b..1c4dc664 100644 --- a/test/test/test_cryptodev_aes_test_vectors.h +++ b/test/test/test_cryptodev_aes_test_vectors.h @@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest " @@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-192-CTR XCBC Encryption Digest", @@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest " @@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest", @@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest " @@ -1257,7 +1262,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .test_data = &aes_test_data_13, .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest " @@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest " @@ -1302,7 +1308,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .test_data = &aes_test_data_13, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest", @@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest " @@ -1323,7 +1330,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .test_data = &aes_test_data_12, .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest " @@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest " @@ -1345,7 +1353,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .test_data = &aes_test_data_12, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest", @@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest " @@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB | - BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest " @@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest " @@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest " @@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest", @@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest " @@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest " @@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = @@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = { .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 | - BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, }; @@ -1526,7 +1543,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-128-CBC Decryption", @@ -1538,7 +1557,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-192-CBC Encryption", @@ -1549,7 +1570,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-192-CBC Encryption Scater gather", @@ -1570,7 +1593,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-192-CBC Decryption Scatter Gather", @@ -1590,7 +1615,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-256-CBC Decryption", @@ -1602,7 +1629,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-256-CBC OOP Encryption", @@ -1612,7 +1641,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-256-CBC OOP Decryption", @@ -1622,7 +1653,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO }, { .test_descr = "AES-128-CTR Encryption", @@ -1634,7 +1667,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CTR Decryption", @@ -1646,7 +1680,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-192-CTR Encryption", @@ -1657,7 +1692,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-192-CTR Decryption", @@ -1668,7 +1704,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-256-CTR Encryption", @@ -1680,7 +1717,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-256-CTR Decryption", @@ -1692,7 +1730,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "AES-128-CTR Encryption (12-byte IV)", diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c index ed066180..256a7daa 100644 --- a/test/test/test_cryptodev_blockcipher.c +++ b/test/test/test_cryptodev_blockcipher.c @@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, int openssl_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)); + int ccp_pmd = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)); int scheduler_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)); int armv8_pmd = rte_cryptodev_driver_id_get( @@ -67,7 +69,9 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, int dpaa_sec_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD)); int mrvl_pmd = rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)); + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)); + int virtio_pmd = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)); int nb_segs = 1; @@ -94,7 +98,9 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t, driver_id == qat_pmd || driver_id == openssl_pmd || driver_id == armv8_pmd || - driver_id == mrvl_pmd) { /* Fall through */ + driver_id == mrvl_pmd || + driver_id == ccp_pmd || + driver_id == virtio_pmd) { /* Fall through */ digest_len = tdata->digest.len; } else if (driver_id == aesni_mb_pmd || driver_id == scheduler_pmd) { @@ -555,6 +561,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool, int openssl_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)); + int ccp_pmd = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_CCP_PMD)); int dpaa2_sec_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD)); int dpaa_sec_pmd = rte_cryptodev_driver_id_get( @@ -568,7 +576,9 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool, int qat_pmd = rte_cryptodev_driver_id_get( RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD)); int mrvl_pmd = rte_cryptodev_driver_id_get( - RTE_STR(CRYPTODEV_NAME_MRVL_PMD)); + RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)); + int virtio_pmd = rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)); switch (test_type) { case BLKCIPHER_AES_CHAIN_TYPE: @@ -627,10 +637,14 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool, target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER; else if (driver_id == dpaa2_sec_pmd) target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC; + else if (driver_id == ccp_pmd) + target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP; else if (driver_id == dpaa_sec_pmd) target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC; else if (driver_id == mrvl_pmd) - target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MRVL; + target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MVSAM; + else if (driver_id == virtio_pmd) + target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO; else TEST_ASSERT(0, "Unrecognized cryptodev type"); diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h index edbdaabe..6f7c8929 100644 --- a/test/test/test_cryptodev_blockcipher.h +++ b/test/test/test_cryptodev_blockcipher.h @@ -26,7 +26,9 @@ #define BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER 0x0010 /* Scheduler */ #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */ #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */ -#define BLOCKCIPHER_TEST_TARGET_PMD_MRVL 0x0080 /* Marvell flag */ +#define BLOCKCIPHER_TEST_TARGET_PMD_MVSAM 0x0080 /* Marvell flag */ +#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0040 /* CCP flag */ +#define BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO 0x0200 /* VIRTIO flag */ #define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \ BLOCKCIPHER_TEST_OP_DECRYPT) diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h index 0be809e3..43be83d6 100644 --- a/test/test/test_cryptodev_des_test_vectors.h +++ b/test/test/test_cryptodev_des_test_vectors.h @@ -826,7 +826,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "DES-CBC Decryption", @@ -835,7 +835,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_MB | BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, }; @@ -1044,7 +1044,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify", @@ -1053,19 +1054,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CBC SHA1 Encryption Digest", .test_data = &triple_des128cbc_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify", .test_data = &triple_des128cbc_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest", @@ -1075,7 +1079,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify", @@ -1085,21 +1090,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC SHA1 Encryption Digest", .test_data = &triple_des192cbc_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify", .test_data = &triple_des192cbc_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest", @@ -1180,7 +1188,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { .test_data = &triple_des128cbc_hmac_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN, .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = @@ -1189,7 +1198,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = { .test_data = &triple_des128cbc_hmac_sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC, .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, }; @@ -1201,7 +1211,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CBC Decryption", @@ -1210,7 +1221,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = { .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC + BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC Encryption", @@ -1220,7 +1232,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-192-CBC Decryption", @@ -1230,7 +1243,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_QAT | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_MRVL + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM | + BLOCKCIPHER_TEST_TARGET_PMD_CCP }, { .test_descr = "3DES-128-CTR Encryption", diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h index 93dacb7b..cf86dbb1 100644 --- a/test/test/test_cryptodev_hash_test_vectors.h +++ b/test/test/test_cryptodev_hash_test_vectors.h @@ -319,18 +319,68 @@ hmac_sha512_test_vector = { } }; +static const struct blockcipher_test_data +cmac_test_vector = { + .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC, + .ciphertext = { + .data = plaintext_hash, + .len = 512 + }, + .auth_key = { + .data = { + 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, + 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C + }, + .len = 16 + }, + .digest = { + .data = { + 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96, + 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27 + }, + .len = 16, + .truncated_len = 16 + } +}; + +static const struct blockcipher_test_data +cmac_test_vector_12 = { + .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC, + .ciphertext = { + .data = plaintext_hash, + .len = 512 + }, + .auth_key = { + .data = { + 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, + 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C + }, + .len = 16 + }, + .digest = { + .data = { + 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96, + 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27 + }, + .len = 12, + .truncated_len = 12 + } +}; + static const struct blockcipher_test_case hash_test_cases[] = { { .test_descr = "MD5 Digest", .test_data = &md5_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "MD5 Digest Verify", .test_data = &md5_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-MD5 Digest", @@ -341,7 +391,8 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-MD5 Digest Verify", @@ -352,19 +403,24 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA1 Digest", .test_data = &sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA1 Digest Verify", .test_data = &sha1_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA1 Digest", @@ -375,7 +431,9 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA1 Digest Scatter Gather", @@ -394,7 +452,9 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA1 Digest Verify Scatter Gather", @@ -408,13 +468,17 @@ static const struct blockcipher_test_case hash_test_cases[] = { .test_descr = "SHA224 Digest", .test_data = &sha224_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA224 Digest Verify", .test_data = &sha224_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA224 Digest", @@ -425,6 +489,7 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | BLOCKCIPHER_TEST_TARGET_PMD_QAT }, { @@ -436,19 +501,24 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | BLOCKCIPHER_TEST_TARGET_PMD_QAT }, { .test_descr = "SHA256 Digest", .test_data = &sha256_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA256 Digest Verify", .test_data = &sha256_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA256 Digest", @@ -459,7 +529,9 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA256 Digest Verify", @@ -470,19 +542,25 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA384 Digest", .test_data = &sha384_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA384 Digest Verify", .test_data = &sha384_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA384 Digest", @@ -493,7 +571,9 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA384 Digest Verify", @@ -504,19 +584,25 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA512 Digest", .test_data = &sha512_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "SHA512 Digest Verify", .test_data = &sha512_test_vector, .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, - .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA512 Digest", @@ -527,7 +613,9 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, { .test_descr = "HMAC-SHA512 Digest Verify", @@ -538,8 +626,34 @@ static const struct blockcipher_test_case hash_test_cases[] = { BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER | BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC | BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC | - BLOCKCIPHER_TEST_TARGET_PMD_QAT + BLOCKCIPHER_TEST_TARGET_PMD_QAT | + BLOCKCIPHER_TEST_TARGET_PMD_CCP | + BLOCKCIPHER_TEST_TARGET_PMD_MVSAM }, + { + .test_descr = "CMAC Digest 12B", + .test_data = &cmac_test_vector_12, + .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB + }, + { + .test_descr = "CMAC Digest Verify 12B", + .test_data = &cmac_test_vector_12, + .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB + }, + { + .test_descr = "CMAC Digest 16B", + .test_data = &cmac_test_vector, + .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN, + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB + }, + { + .test_descr = "CMAC Digest Verify 16B", + .test_data = &cmac_test_vector, + .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY, + .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB + } }; #endif /* TEST_CRYPTODEV_HASH_TEST_VECTORS_H_ */ diff --git a/test/test/test_devargs.c b/test/test/test_devargs.c deleted file mode 100644 index b8f3146f..00000000 --- a/test/test/test_devargs.c +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2014 6WIND S.A. - */ - -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <sys/queue.h> - -#include <rte_debug.h> -#include <rte_devargs.h> - -#include "test.h" - -/* clear devargs list that was modified by the test */ -static void free_devargs_list(void) -{ - struct rte_devargs *devargs; - - while (!TAILQ_EMPTY(&devargs_list)) { - devargs = TAILQ_FIRST(&devargs_list); - TAILQ_REMOVE(&devargs_list, devargs, next); - free(devargs->args); - free(devargs); - } -} - -static int -test_devargs(void) -{ - struct rte_devargs_list save_devargs_list; - struct rte_devargs *devargs; - - /* save the real devargs_list, it is restored at the end of the test */ - save_devargs_list = devargs_list; - TAILQ_INIT(&devargs_list); - - /* test valid cases */ - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:00.1") < 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "0000:5:00.0") < 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "04:00.0,arg=val") < 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "0000:01:00.1") < 0) - goto fail; - if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 2) - goto fail; - if (rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 2) - goto fail; - if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring0") < 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,key=val,k2=val2") < 0) - goto fail; - if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 2) - goto fail; - free_devargs_list(); - - /* check virtual device with argument parsing */ - if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,k1=val,k2=val2") < 0) - goto fail; - devargs = TAILQ_FIRST(&devargs_list); - if (strncmp(devargs->name, "net_ring1", - sizeof(devargs->name)) != 0) - goto fail; - if (!devargs->args || strcmp(devargs->args, "k1=val,k2=val2") != 0) - goto fail; - free_devargs_list(); - - /* check PCI device with empty argument parsing */ - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "04:00.1") < 0) - goto fail; - devargs = TAILQ_FIRST(&devargs_list); - if (strcmp(devargs->name, "04:00.1") != 0) - goto fail; - if (!devargs->args || strcmp(devargs->args, "") != 0) - goto fail; - free_devargs_list(); - - /* test error case: bad PCI address */ - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:1") == 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "00.1") == 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "foo") == 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, ",") == 0) - goto fail; - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "000f:0:0") == 0) - goto fail; - - devargs_list = save_devargs_list; - return 0; - - fail: - free_devargs_list(); - devargs_list = save_devargs_list; - return -1; -} - -REGISTER_TEST_COMMAND(devargs_autotest, test_devargs); diff --git a/test/test/test_distributor_perf.c b/test/test/test_distributor_perf.c index 557715e1..edf1998a 100644 --- a/test/test/test_distributor_perf.c +++ b/test/test/test_distributor_perf.c @@ -31,7 +31,7 @@ struct worker_stats worker_stats[RTE_MAX_LCORE]; * worker thread used for testing the time to do a round-trip of a cache * line between two cores and back again */ -static void +static int flip_bit(volatile uint64_t *arg) { uint64_t old_val = 0; @@ -41,6 +41,7 @@ flip_bit(volatile uint64_t *arg) old_val = *arg; *arg = 0; } + return 0; } /* diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c index 37c42efe..f840ca50 100644 --- a/test/test/test_eal_flags.c +++ b/test/test/test_eal_flags.c @@ -33,7 +33,7 @@ #define memtest "memtest" #define memtest1 "memtest1" #define memtest2 "memtest2" -#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10) +#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 20) #define launch_proc(ARGV) process_dup(ARGV, \ sizeof(ARGV)/(sizeof(ARGV[0])), __func__) @@ -1138,10 +1138,11 @@ test_memory_flags(void) #ifdef RTE_EXEC_ENV_BSDAPP int i, num_sockets = 1; #else - int i, num_sockets = get_number_of_sockets(); + int i, num_sockets = RTE_MIN(get_number_of_sockets(), + RTE_MAX_NUMA_NODES); #endif - if (num_sockets <= 0 || num_sockets > RTE_MAX_NUMA_NODES) { + if (num_sockets <= 0) { printf("Error - cannot get number of sockets!\n"); return -1; } @@ -1151,11 +1152,12 @@ test_memory_flags(void) /* add one extra socket */ for (i = 0; i < num_sockets + 1; i++) { snprintf(buf, sizeof(buf), "%s%s", invalid_socket_mem, DEFAULT_MEM_SIZE); - snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf); + strlcpy(invalid_socket_mem, buf, sizeof(invalid_socket_mem)); if (num_sockets + 1 - i > 1) { snprintf(buf, sizeof(buf), "%s,", invalid_socket_mem); - snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf); + strlcpy(invalid_socket_mem, buf, + sizeof(invalid_socket_mem)); } } @@ -1167,11 +1169,12 @@ test_memory_flags(void) /* add one extra socket */ for (i = 0; i < num_sockets; i++) { snprintf(buf, sizeof(buf), "%s%s", valid_socket_mem, DEFAULT_MEM_SIZE); - snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf); + strlcpy(valid_socket_mem, buf, sizeof(valid_socket_mem)); if (num_sockets - i > 1) { snprintf(buf, sizeof(buf), "%s,", valid_socket_mem); - snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf); + strlcpy(valid_socket_mem, buf, + sizeof(valid_socket_mem)); } } diff --git a/test/test/test_event_crypto_adapter.c b/test/test/test_event_crypto_adapter.c new file mode 100644 index 00000000..066b0ade --- /dev/null +++ b/test/test/test_event_crypto_adapter.c @@ -0,0 +1,928 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + * All rights reserved. + */ + +#include <string.h> +#include <rte_common.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_cryptodev.h> +#include <rte_eventdev.h> +#include <rte_bus_vdev.h> +#include <rte_service.h> +#include <rte_event_crypto_adapter.h> +#include "test.h" + +#define PKT_TRACE 0 +#define NUM 1 +#define DEFAULT_NUM_XFORMS (2) +#define NUM_MBUFS (8191) +#define MBUF_CACHE_SIZE (256) +#define MAXIMUM_IV_LENGTH (16) +#define DEFAULT_NUM_OPS_INFLIGHT (128) +#define MAX_NB_SESSIONS 4 +#define TEST_APP_PORT_ID 0 +#define TEST_APP_EV_QUEUE_ID 0 +#define TEST_APP_EV_PRIORITY 0 +#define TEST_APP_EV_FLOWID 0xAABB +#define TEST_CRYPTO_EV_QUEUE_ID 1 +#define TEST_ADAPTER_ID 0 +#define TEST_CDEV_ID 0 +#define TEST_CDEV_QP_ID 0 +#define PACKET_LENGTH 64 +#define NB_TEST_PORTS 1 +#define NB_TEST_QUEUES 2 +#define NUM_CORES 1 +#define CRYPTODEV_NAME_NULL_PMD crypto_null + +#define MBUF_SIZE (sizeof(struct rte_mbuf) + \ + RTE_PKTMBUF_HEADROOM + PACKET_LENGTH) +#define IV_OFFSET (sizeof(struct rte_crypto_op) + \ + sizeof(struct rte_crypto_sym_op) + \ + DEFAULT_NUM_XFORMS * \ + sizeof(struct rte_crypto_sym_xform)) + +/* Handle log statements in same manner as test macros */ +#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) + +static const uint8_t text_64B[] = { + 0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50, + 0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0, + 0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36, + 0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e, + 0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c, + 0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e, + 0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6, + 0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c +}; + +struct event_crypto_adapter_test_params { + struct rte_mempool *mbuf_pool; + struct rte_mempool *op_mpool; + struct rte_mempool *session_mpool; + struct rte_cryptodev_config *config; + uint8_t crypto_event_port_id; +}; + +struct rte_event response_info = { + .queue_id = TEST_APP_EV_QUEUE_ID, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .flow_id = TEST_APP_EV_FLOWID, + .priority = TEST_APP_EV_PRIORITY +}; + +struct rte_event_crypto_request request_info = { + .cdev_id = TEST_CDEV_ID, + .queue_pair_id = TEST_CDEV_QP_ID +}; + +static struct event_crypto_adapter_test_params params; +static uint8_t crypto_adapter_setup_done; +static uint32_t slcore_id; +static int evdev; + +static struct rte_mbuf * +alloc_fill_mbuf(struct rte_mempool *mpool, const uint8_t *data, + size_t len, uint8_t blocksize) +{ + struct rte_mbuf *m = rte_pktmbuf_alloc(mpool); + size_t t_len = len - (blocksize ? (len % blocksize) : 0); + + if (m) { + char *dst = rte_pktmbuf_append(m, t_len); + + if (!dst) { + rte_pktmbuf_free(m); + return NULL; + } + + rte_memcpy(dst, (const void *)data, t_len); + } + return m; +} + +static int +send_recv_ev(struct rte_event *ev) +{ + struct rte_crypto_op *op; + struct rte_event recv_ev; + int ret; + + ret = rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, ev, NUM); + TEST_ASSERT_EQUAL(ret, NUM, + "Failed to send event to crypto adapter\n"); + + while (rte_event_dequeue_burst(evdev, + TEST_APP_PORT_ID, &recv_ev, NUM, 0) == 0) + rte_pause(); + + op = recv_ev.event_ptr; +#if PKT_TRACE + struct rte_mbuf *m = op->sym->m_src; + rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); +#endif + rte_pktmbuf_free(op->sym->m_src); + rte_crypto_op_free(op); + + return TEST_SUCCESS; +} + +static int +test_crypto_adapter_stats(void) +{ + struct rte_event_crypto_adapter_stats stats; + + rte_event_crypto_adapter_stats_get(TEST_ADAPTER_ID, &stats); + printf(" +------------------------------------------------------+\n"); + printf(" + Crypto adapter stats for instance %u:\n", TEST_ADAPTER_ID); + printf(" + Event port poll count %" PRIx64 "\n", + stats.event_poll_count); + printf(" + Event dequeue count %" PRIx64 "\n", + stats.event_deq_count); + printf(" + Cryptodev enqueue count %" PRIx64 "\n", + stats.crypto_enq_count); + printf(" + Cryptodev enqueue failed count %" PRIx64 "\n", + stats.crypto_enq_fail); + printf(" + Cryptodev dequeue count %" PRIx64 "\n", + stats.crypto_deq_count); + printf(" + Event enqueue count %" PRIx64 "\n", + stats.event_enq_count); + printf(" + Event enqueue retry count %" PRIx64 "\n", + stats.event_enq_retry_count); + printf(" + Event enqueue fail count %" PRIx64 "\n", + stats.event_enq_fail_count); + printf(" +------------------------------------------------------+\n"); + + rte_event_crypto_adapter_stats_reset(TEST_ADAPTER_ID); + return TEST_SUCCESS; +} + +static int +test_op_forward_mode(uint8_t session_less) +{ + struct rte_crypto_sym_xform cipher_xform; + struct rte_cryptodev_sym_session *sess; + union rte_event_crypto_metadata m_data; + struct rte_crypto_sym_op *sym_op; + struct rte_crypto_op *op; + struct rte_mbuf *m; + struct rte_event ev; + uint32_t cap; + int ret; + + memset(&m_data, 0, sizeof(m_data)); + + m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0); + TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n"); +#if PKT_TRACE + rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); +#endif + /* Setup Cipher Parameters */ + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; + + cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); + TEST_ASSERT_NOT_NULL(op, + "Failed to allocate symmetric crypto operation struct\n"); + + sym_op = op->sym; + + if (!session_less) { + sess = rte_cryptodev_sym_session_create(params.session_mpool); + TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n"); + + /* Create Crypto session*/ + rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess, + &cipher_xform, params.session_mpool); + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, + evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { + /* Fill in private date information */ + rte_memcpy(&m_data.response_info, &response_info, + sizeof(response_info)); + rte_memcpy(&m_data.request_info, &request_info, + sizeof(request_info)); + rte_cryptodev_sym_session_set_private_data(sess, + &m_data, sizeof(m_data)); + } + + rte_crypto_op_attach_sym_session(op, sess); + } else { + struct rte_crypto_sym_xform *first_xform; + + rte_crypto_op_sym_xforms_alloc(op, NUM); + op->sess_type = RTE_CRYPTO_OP_SESSIONLESS; + first_xform = &cipher_xform; + sym_op->xform = first_xform; + uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH + + (sizeof(struct rte_crypto_sym_xform) * 2); + op->private_data_offset = len; + /* Fill in private data information */ + rte_memcpy(&m_data.response_info, &response_info, + sizeof(response_info)); + rte_memcpy(&m_data.request_info, &request_info, + sizeof(request_info)); + rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data)); + } + + sym_op->m_src = m; + sym_op->cipher.data.offset = 0; + sym_op->cipher.data.length = PACKET_LENGTH; + + /* Fill in event info and update event_ptr with rte_crypto_op */ + memset(&ev, 0, sizeof(ev)); + ev.queue_id = TEST_CRYPTO_EV_QUEUE_ID; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.flow_id = 0xAABB; + ev.event_ptr = op; + + ret = send_recv_ev(&ev); + TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to " + "crypto adapter\n"); + + test_crypto_adapter_stats(); + + return TEST_SUCCESS; +} + +static int +map_adapter_service_core(void) +{ + uint32_t adapter_service_id; + int ret; + + if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID, + &adapter_service_id) == 0) { + uint32_t core_list[NUM_CORES]; + + ret = rte_service_lcore_list(core_list, NUM_CORES); + TEST_ASSERT(ret >= 0, "Failed to get service core list!"); + + if (core_list[0] != slcore_id) { + TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), + "Failed to add service core"); + TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), + "Failed to start service core"); + } + + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( + adapter_service_id, slcore_id, 1), + "Failed to map adapter service"); + } + + return TEST_SUCCESS; +} + +static int +test_sessionless_with_op_forward_mode(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) + map_adapter_service_core(); + + TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), + "Failed to start event crypto adapter"); + + ret = test_op_forward_mode(1); + TEST_ASSERT_SUCCESS(ret, "Sessionless - FORWARD mode test failed\n"); + return TEST_SUCCESS; +} + +static int +test_session_with_op_forward_mode(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) + map_adapter_service_core(); + + TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID + ), "Failed to start event crypto adapter"); + + ret = test_op_forward_mode(0); + TEST_ASSERT_SUCCESS(ret, "Session based - FORWARD mode test failed\n"); + return TEST_SUCCESS; +} + +static int +send_op_recv_ev(struct rte_crypto_op *op) +{ + struct rte_crypto_op *recv_op; + struct rte_event ev; + int ret; + + ret = rte_cryptodev_enqueue_burst(TEST_CDEV_ID, TEST_CDEV_QP_ID, + &op, NUM); + TEST_ASSERT_EQUAL(ret, NUM, "Failed to enqueue to cryptodev\n"); + memset(&ev, 0, sizeof(ev)); + + while (rte_event_dequeue_burst(evdev, + TEST_APP_PORT_ID, &ev, NUM, 0) == 0) + rte_pause(); + + recv_op = ev.event_ptr; +#if PKT_TRACE + struct rte_mbuf *m = recv_op->sym->m_src; + rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); +#endif + rte_pktmbuf_free(recv_op->sym->m_src); + rte_crypto_op_free(recv_op); + + return TEST_SUCCESS; +} + +static int +test_op_new_mode(uint8_t session_less) +{ + struct rte_crypto_sym_xform cipher_xform; + struct rte_cryptodev_sym_session *sess; + union rte_event_crypto_metadata m_data; + struct rte_crypto_sym_op *sym_op; + struct rte_crypto_op *op; + struct rte_mbuf *m; + uint32_t cap; + int ret; + + memset(&m_data, 0, sizeof(m_data)); + + m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0); + TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n"); +#if PKT_TRACE + rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); +#endif + /* Setup Cipher Parameters */ + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; + cipher_xform.next = NULL; + + cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + + op = rte_crypto_op_alloc(params.op_mpool, + RTE_CRYPTO_OP_TYPE_SYMMETRIC); + TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n"); + + sym_op = op->sym; + + if (!session_less) { + sess = rte_cryptodev_sym_session_create(params.session_mpool); + TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n"); + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, + evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { + /* Fill in private data information */ + rte_memcpy(&m_data.response_info, &response_info, + sizeof(m_data)); + rte_cryptodev_sym_session_set_private_data(sess, + &m_data, sizeof(m_data)); + } + rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess, + &cipher_xform, params.session_mpool); + rte_crypto_op_attach_sym_session(op, sess); + } else { + struct rte_crypto_sym_xform *first_xform; + + rte_crypto_op_sym_xforms_alloc(op, NUM); + op->sess_type = RTE_CRYPTO_OP_SESSIONLESS; + first_xform = &cipher_xform; + sym_op->xform = first_xform; + uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH + + (sizeof(struct rte_crypto_sym_xform) * 2); + op->private_data_offset = len; + /* Fill in private data information */ + rte_memcpy(&m_data.response_info, &response_info, + sizeof(m_data)); + rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data)); + } + + sym_op->m_src = m; + sym_op->cipher.data.offset = 0; + sym_op->cipher.data.length = PACKET_LENGTH; + + ret = send_op_recv_ev(op); + TEST_ASSERT_SUCCESS(ret, "Failed to enqueue op to cryptodev\n"); + + test_crypto_adapter_stats(); + + return TEST_SUCCESS; +} + +static int +test_sessionless_with_op_new_mode(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || + !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) + map_adapter_service_core(); + + /* start the event crypto adapter */ + TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), + "Failed to start event crypto adapter"); + + ret = test_op_new_mode(1); + TEST_ASSERT_SUCCESS(ret, "Sessionless - NEW mode test failed\n"); + return TEST_SUCCESS; +} + +static int +test_session_with_op_new_mode(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) || + !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) + map_adapter_service_core(); + + TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), + "Failed to start event crypto adapter"); + + ret = test_op_new_mode(0); + TEST_ASSERT_SUCCESS(ret, "Session based - NEW mode test failed\n"); + return TEST_SUCCESS; +} + +static int +configure_cryptodev(void) +{ + struct rte_cryptodev_qp_conf qp_conf; + struct rte_cryptodev_config conf; + struct rte_cryptodev_info info; + unsigned int session_size; + uint8_t nb_devs; + int ret; + + params.mbuf_pool = rte_pktmbuf_pool_create( + "CRYPTO_ADAPTER_MBUFPOOL", + NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE, + rte_socket_id()); + if (params.mbuf_pool == NULL) { + RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n"); + return TEST_FAILED; + } + + params.op_mpool = rte_crypto_op_pool_create( + "EVENT_CRYPTO_SYM_OP_POOL", + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + NUM_MBUFS, MBUF_CACHE_SIZE, + DEFAULT_NUM_XFORMS * + sizeof(struct rte_crypto_sym_xform) + + MAXIMUM_IV_LENGTH, + rte_socket_id()); + if (params.op_mpool == NULL) { + RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n"); + return TEST_FAILED; + } + + /* Create a NULL crypto device */ + nb_devs = rte_cryptodev_device_count_by_driver( + rte_cryptodev_driver_id_get( + RTE_STR(CRYPTODEV_NAME_NULL_PMD))); + if (!nb_devs) { + ret = rte_vdev_init( + RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); + + TEST_ASSERT(ret == 0, "Failed to create pmd:%s instance\n", + RTE_STR(CRYPTODEV_NAME_NULL_PMD)); + } + + nb_devs = rte_cryptodev_count(); + if (!nb_devs) { + RTE_LOG(ERR, USER1, "No crypto devices found!\n"); + return TEST_FAILED; + } + + /* + * Create mempool with maximum number of sessions * 2, + * to include the session headers & private data + */ + session_size = rte_cryptodev_sym_get_private_session_size(TEST_CDEV_ID); + session_size += sizeof(union rte_event_crypto_metadata); + + params.session_mpool = rte_mempool_create( + "CRYPTO_ADAPTER_SESSION_MP", + MAX_NB_SESSIONS * 2, + session_size, + 0, 0, NULL, NULL, NULL, + NULL, SOCKET_ID_ANY, + 0); + + TEST_ASSERT_NOT_NULL(params.session_mpool, + "session mempool allocation failed\n"); + + rte_cryptodev_info_get(TEST_CDEV_ID, &info); + conf.nb_queue_pairs = info.max_nb_queue_pairs; + conf.socket_id = SOCKET_ID_ANY; + + TEST_ASSERT_SUCCESS(rte_cryptodev_configure(TEST_CDEV_ID, &conf), + "Failed to configure cryptodev %u with %u qps\n", + TEST_CDEV_ID, conf.nb_queue_pairs); + + qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT; + + TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup( + TEST_CDEV_ID, TEST_CDEV_QP_ID, &qp_conf, + rte_cryptodev_socket_id(TEST_CDEV_ID), + params.session_mpool), + "Failed to setup queue pair %u on cryptodev %u\n", + TEST_CDEV_QP_ID, TEST_CDEV_ID); + + return TEST_SUCCESS; +} + +static inline void +evdev_set_conf_values(struct rte_event_dev_config *dev_conf, + struct rte_event_dev_info *info) +{ + memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); + dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; + dev_conf->nb_event_ports = NB_TEST_PORTS; + dev_conf->nb_event_queues = NB_TEST_QUEUES; + dev_conf->nb_event_queue_flows = info->max_event_queue_flows; + dev_conf->nb_event_port_dequeue_depth = + info->max_event_port_dequeue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_events_limit = + info->max_num_events; +} + +static int +configure_eventdev(void) +{ + struct rte_event_queue_conf queue_conf; + struct rte_event_dev_config devconf; + struct rte_event_dev_info info; + uint32_t queue_count; + uint32_t port_count; + int ret; + uint8_t qid; + + if (!rte_event_dev_count()) { + /* If there is no hardware eventdev, or no software vdev was + * specified on the command line, create an instance of + * event_sw. + */ + LOG_DBG("Failed to find a valid event device... " + "testing with event_sw device\n"); + TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), + "Error creating eventdev"); + evdev = rte_event_dev_get_dev_id("event_sw0"); + } + + ret = rte_event_dev_info_get(evdev, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n"); + + evdev_set_conf_values(&devconf, &info); + + ret = rte_event_dev_configure(evdev, &devconf); + TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n"); + + /* Set up event queue */ + ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, + &queue_count); + TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n"); + TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n"); + + qid = TEST_APP_EV_QUEUE_ID; + ret = rte_event_queue_setup(evdev, qid, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid); + + queue_conf.nb_atomic_flows = info.max_event_queue_flows; + queue_conf.nb_atomic_order_sequences = 32; + queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; + queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; + queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; + + qid = TEST_CRYPTO_EV_QUEUE_ID; + ret = rte_event_queue_setup(evdev, qid, &queue_conf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid); + + /* Set up event port */ + ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT, + &port_count); + TEST_ASSERT_SUCCESS(ret, "Port count get failed\n"); + TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n"); + + ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n", + TEST_APP_PORT_ID); + + qid = TEST_APP_EV_QUEUE_ID; + ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1); + TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n", + TEST_APP_PORT_ID); + + return TEST_SUCCESS; +} + +static void +test_crypto_adapter_free(void) +{ + rte_event_crypto_adapter_free(TEST_ADAPTER_ID); +} + +static int +test_crypto_adapter_create(void) +{ + struct rte_event_port_conf conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + int ret; + + /* Create adapter with default port creation callback */ + ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, + TEST_CDEV_ID, + &conf, 0); + TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n"); + + return TEST_SUCCESS; +} + +static int +test_crypto_adapter_qp_add_del(void) +{ + uint32_t cap; + int ret; + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { + ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, + TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info); + } else + ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, + TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); + + TEST_ASSERT_SUCCESS(ret, "Failed to create add queue pair\n"); + + ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID, + TEST_CDEV_ID, TEST_CDEV_QP_ID); + TEST_ASSERT_SUCCESS(ret, "Failed to delete add queue pair\n"); + + return TEST_SUCCESS; +} + +static int +configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode) +{ + struct rte_event_port_conf conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + uint32_t cap; + int ret; + + /* Create adapter with default port creation callback */ + ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, + TEST_CDEV_ID, + &conf, mode); + TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n"); + + ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap); + TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); + + if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { + ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, + TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info); + } else + ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, + TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); + + TEST_ASSERT_SUCCESS(ret, "Failed to add queue pair\n"); + + ret = rte_event_crypto_adapter_event_port_get(TEST_ADAPTER_ID, + ¶ms.crypto_event_port_id); + TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n"); + + return TEST_SUCCESS; +} + +static void +test_crypto_adapter_stop(void) +{ + uint32_t evdev_service_id, adapter_service_id; + + /* retrieve service ids & stop services */ + if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID, + &adapter_service_id) == 0) { + rte_service_runstate_set(adapter_service_id, 0); + rte_service_lcore_stop(slcore_id); + rte_service_lcore_del(slcore_id); + rte_event_crypto_adapter_stop(TEST_ADAPTER_ID); + } + + if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { + rte_service_runstate_set(evdev_service_id, 0); + rte_service_lcore_stop(slcore_id); + rte_service_lcore_del(slcore_id); + rte_event_dev_stop(evdev); + } +} + +static int +test_crypto_adapter_conf(enum rte_event_crypto_adapter_mode mode) +{ + uint32_t evdev_service_id; + uint8_t qid; + int ret; + + if (!crypto_adapter_setup_done) { + ret = configure_event_crypto_adapter(mode); + if (!ret) { + qid = TEST_CRYPTO_EV_QUEUE_ID; + ret = rte_event_port_link(evdev, + params.crypto_event_port_id, &qid, NULL, 1); + TEST_ASSERT(ret >= 0, "Failed to link queue %d " + "port=%u\n", qid, + params.crypto_event_port_id); + } + crypto_adapter_setup_done = 1; + } + + /* retrieve service ids */ + if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { + /* add a service core and start it */ + TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), + "Failed to add service core"); + TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), + "Failed to start service core"); + + /* map services to it */ + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id, + slcore_id, 1), "Failed to map evdev service"); + + /* set services to running */ + TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id, + 1), "Failed to start evdev service"); + } + + /* start the eventdev */ + TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev), + "Failed to start event device"); + + return TEST_SUCCESS; +} + +static int +test_crypto_adapter_conf_op_forward_mode(void) +{ + enum rte_event_crypto_adapter_mode mode; + + mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD; + test_crypto_adapter_conf(mode); + + return TEST_SUCCESS; +} + +static int +test_crypto_adapter_conf_op_new_mode(void) +{ + enum rte_event_crypto_adapter_mode mode; + + mode = RTE_EVENT_CRYPTO_ADAPTER_OP_NEW; + test_crypto_adapter_conf(mode); + return TEST_SUCCESS; +} + + +static int +testsuite_setup(void) +{ + int ret; + + slcore_id = rte_get_next_lcore(-1, 1, 0); + TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores " + "are required to run this autotest\n"); + + /* Setup and start event device. */ + ret = configure_eventdev(); + TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n"); + + /* Setup and start crypto device. */ + ret = configure_cryptodev(); + TEST_ASSERT_SUCCESS(ret, "cryptodev initialization failed\n"); + + return TEST_SUCCESS; +} + +static void +crypto_teardown(void) +{ + /* Free mbuf mempool */ + if (params.mbuf_pool != NULL) { + RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_MBUFPOOL count %u\n", + rte_mempool_avail_count(params.mbuf_pool)); + rte_mempool_free(params.mbuf_pool); + params.mbuf_pool = NULL; + } + + /* Free session mempool */ + if (params.session_mpool != NULL) { + RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_SESSION_MP count %u\n", + rte_mempool_avail_count(params.session_mpool)); + rte_mempool_free(params.session_mpool); + params.session_mpool = NULL; + } + + /* Free ops mempool */ + if (params.op_mpool != NULL) { + RTE_LOG(DEBUG, USER1, "EVENT_CRYPTO_SYM_OP_POOL count %u\n", + rte_mempool_avail_count(params.op_mpool)); + rte_mempool_free(params.op_mpool); + params.op_mpool = NULL; + } +} + +static void +eventdev_teardown(void) +{ + rte_event_dev_stop(evdev); +} + +static void +testsuite_teardown(void) +{ + crypto_teardown(); + eventdev_teardown(); +} + +static struct unit_test_suite functional_testsuite = { + .suite_name = "Event crypto adapter test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + + TEST_CASE_ST(NULL, test_crypto_adapter_free, + test_crypto_adapter_create), + + TEST_CASE_ST(test_crypto_adapter_create, + test_crypto_adapter_free, + test_crypto_adapter_qp_add_del), + + TEST_CASE_ST(test_crypto_adapter_create, + test_crypto_adapter_free, + test_crypto_adapter_stats), + + TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode, + test_crypto_adapter_stop, + test_session_with_op_forward_mode), + + TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode, + test_crypto_adapter_stop, + test_sessionless_with_op_forward_mode), + + TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode, + test_crypto_adapter_stop, + test_session_with_op_new_mode), + + TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode, + test_crypto_adapter_stop, + test_sessionless_with_op_new_mode), + + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_event_crypto_adapter(void) +{ + return unit_test_suite_runner(&functional_testsuite); +} + +REGISTER_TEST_COMMAND(event_crypto_adapter_autotest, + test_event_crypto_adapter); diff --git a/test/test/test_event_eth_rx_adapter.c b/test/test/test_event_eth_rx_adapter.c index 006ed314..dee632bf 100644 --- a/test/test/test_event_eth_rx_adapter.c +++ b/test/test/test_event_eth_rx_adapter.c @@ -51,7 +51,7 @@ port_init(uint8_t port, struct rte_mempool *mp) uint16_t q; struct rte_eth_dev_info dev_info; - if (port >= rte_eth_dev_count()) + if (!rte_eth_dev_is_valid_port(port)) return -1; retval = rte_eth_dev_configure(port, 0, 0, &port_conf); @@ -107,7 +107,7 @@ port_init(uint8_t port, struct rte_mempool *mp) static int init_ports(int num_ports) { - uint8_t portid; + uint16_t portid; int retval; default_params.mp = rte_pktmbuf_pool_create("packet_pool", @@ -119,7 +119,7 @@ init_ports(int num_ports) if (!default_params.mp) return -ENOMEM; - for (portid = 0; portid < num_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { retval = port_init(portid, default_params.mp); if (retval) return retval; @@ -164,7 +164,7 @@ testsuite_setup(void) * so rte_eth_dev_start invokes rte_event_dev_start internally, so * call init_ports after rte_event_dev_configure */ - err = init_ports(rte_eth_dev_count()); + err = init_ports(rte_eth_dev_count_total()); TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err); err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, @@ -179,7 +179,7 @@ static void testsuite_teardown(void) { uint32_t i; - for (i = 0; i < rte_eth_dev_count(); i++) + RTE_ETH_FOREACH_DEV(i) rte_eth_dev_stop(i); rte_mempool_free(default_params.mp); @@ -273,7 +273,7 @@ adapter_queue_add_del(void) queue_config.servicing_weight = 1; err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, - rte_eth_dev_count(), + rte_eth_dev_count_total(), -1, &queue_config); TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); diff --git a/test/test/test_event_timer_adapter.c b/test/test/test_event_timer_adapter.c new file mode 100644 index 00000000..93471db1 --- /dev/null +++ b/test/test/test_event_timer_adapter.c @@ -0,0 +1,1830 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc + * Copyright(c) 2017-2018 Intel Corporation. + */ + +#include <rte_atomic.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_debug.h> +#include <rte_eal.h> +#include <rte_ethdev.h> +#include <rte_eventdev.h> +#include <rte_event_timer_adapter.h> +#include <rte_mempool.h> +#include <rte_launch.h> +#include <rte_lcore.h> +#include <rte_per_lcore.h> +#include <rte_random.h> +#include <rte_bus_vdev.h> +#include <rte_service.h> +#include <stdbool.h> + +#include "test.h" + +/* 4K timers corresponds to sw evdev max inflight events */ +#define MAX_TIMERS (4 * 1024) +#define BKT_TCK_NSEC + +#define NSECPERSEC 1E9 +#define BATCH_SIZE 16 +/* Both the app lcore and adapter ports are linked to this queue */ +#define TEST_QUEUE_ID 0 +/* Port the application dequeues from */ +#define TEST_PORT_ID 0 +#define TEST_ADAPTER_ID 0 + +/* Handle log statements in same manner as test macros */ +#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) + +static int evdev; +static struct rte_event_timer_adapter *timdev; +static struct rte_mempool *eventdev_test_mempool; +static struct rte_ring *timer_producer_ring; +static uint64_t global_bkt_tck_ns; +static volatile uint8_t arm_done; + +static bool using_services; +static uint32_t test_lcore1; +static uint32_t test_lcore2; +static uint32_t test_lcore3; +static uint32_t sw_evdev_slcore; +static uint32_t sw_adptr_slcore; + +static inline void +devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, + struct rte_event_dev_info *info) +{ + memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); + dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; + dev_conf->nb_event_ports = 1; + dev_conf->nb_event_queues = 1; + dev_conf->nb_event_queue_flows = info->max_event_queue_flows; + dev_conf->nb_event_port_dequeue_depth = + info->max_event_port_dequeue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_event_port_enqueue_depth = + info->max_event_port_enqueue_depth; + dev_conf->nb_events_limit = + info->max_num_events; +} + +static inline int +eventdev_setup(void) +{ + int ret; + struct rte_event_dev_config dev_conf; + struct rte_event_dev_info info; + uint32_t service_id; + + ret = rte_event_dev_info_get(evdev, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); + TEST_ASSERT(info.max_num_events >= (int32_t)MAX_TIMERS, + "ERROR max_num_events=%d < max_events=%d", + info.max_num_events, MAX_TIMERS); + + devconf_set_default_sane_values(&dev_conf, &info); + ret = rte_event_dev_configure(evdev, &dev_conf); + TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); + + ret = rte_event_queue_setup(evdev, 0, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0); + + /* Configure event port */ + ret = rte_event_port_setup(evdev, 0, NULL); + TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0); + ret = rte_event_port_link(evdev, 0, NULL, NULL, 0); + TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0); + + /* If this is a software event device, map and start its service */ + if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { + TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore), + "Failed to add service core"); + TEST_ASSERT_SUCCESS(rte_service_lcore_start( + sw_evdev_slcore), + "Failed to start service core"); + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( + service_id, sw_evdev_slcore, 1), + "Failed to map evdev service"); + TEST_ASSERT_SUCCESS(rte_service_runstate_set( + service_id, 1), + "Failed to start evdev service"); + } + + ret = rte_event_dev_start(evdev); + TEST_ASSERT_SUCCESS(ret, "Failed to start device"); + + return TEST_SUCCESS; +} + +static int +testsuite_setup(void) +{ + /* Some of the multithreaded tests require 3 other lcores to run */ + unsigned int required_lcore_count = 4; + uint32_t service_id; + + /* To make it easier to map services later if needed, just reset + * service core state. + */ + (void) rte_service_lcore_reset_all(); + + if (!rte_event_dev_count()) { + /* If there is no hardware eventdev, or no software vdev was + * specified on the command line, create an instance of + * event_sw. + */ + LOG_DBG("Failed to find a valid event device... testing with" + " event_sw device\n"); + TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), + "Error creating eventdev"); + evdev = rte_event_dev_get_dev_id("event_sw0"); + } + + if (rte_event_dev_service_id_get(evdev, &service_id) == 0) { + /* A software event device will use a software event timer + * adapter as well. 2 more cores required to convert to + * service cores. + */ + required_lcore_count += 2; + using_services = true; + } + + if (rte_lcore_count() < required_lcore_count) { + printf("%d lcores needed to run tests", required_lcore_count); + return TEST_FAILED; + } + + /* Assign lcores for various tasks */ + test_lcore1 = rte_get_next_lcore(-1, 1, 0); + test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0); + test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0); + if (using_services) { + sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0); + sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0); + } + + return eventdev_setup(); +} + +static void +testsuite_teardown(void) +{ + rte_event_dev_stop(evdev); + rte_event_dev_close(evdev); +} + +static int +setup_adapter_service(struct rte_event_timer_adapter *adptr) +{ + uint32_t adapter_service_id; + int ret; + + /* retrieve service ids */ + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr, + &adapter_service_id), "Failed to get event timer " + "adapter service id"); + /* add a service core and start it */ + ret = rte_service_lcore_add(sw_adptr_slcore); + TEST_ASSERT(ret == 0 || ret == -EALREADY, + "Failed to add service core"); + ret = rte_service_lcore_start(sw_adptr_slcore); + TEST_ASSERT(ret == 0 || ret == -EALREADY, + "Failed to start service core"); + + /* map services to it */ + TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id, + sw_adptr_slcore, 1), + "Failed to map adapter service"); + + /* set services to running */ + TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1), + "Failed to start event timer adapter service"); + + return TEST_SUCCESS; +} + +static int +test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, + void *conf_arg) +{ + struct rte_event_dev_config dev_conf; + struct rte_event_dev_info info; + struct rte_event_port_conf *port_conf, def_port_conf = {0}; + uint32_t started; + static int port_allocated; + static uint8_t port_id; + int ret; + + if (port_allocated) { + *event_port_id = port_id; + return 0; + } + + RTE_SET_USED(id); + + ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED, + &started); + if (ret < 0) + return ret; + + if (started) + rte_event_dev_stop(event_dev_id); + + ret = rte_event_dev_info_get(evdev, &info); + if (ret < 0) + return ret; + + devconf_set_default_sane_values(&dev_conf, &info); + + port_id = dev_conf.nb_event_ports; + dev_conf.nb_event_ports++; + + ret = rte_event_dev_configure(event_dev_id, &dev_conf); + if (ret < 0) { + if (started) + rte_event_dev_start(event_dev_id); + return ret; + } + + if (conf_arg != NULL) + port_conf = conf_arg; + else { + port_conf = &def_port_conf; + ret = rte_event_port_default_conf_get(event_dev_id, port_id, + port_conf); + if (ret < 0) + return ret; + } + + ret = rte_event_port_setup(event_dev_id, port_id, port_conf); + if (ret < 0) + return ret; + + *event_port_id = port_id; + + if (started) + rte_event_dev_start(event_dev_id); + + /* Reuse this port number next time this is called */ + port_allocated = 1; + + return 0; +} + +static int +_timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns) +{ + struct rte_event_timer_adapter_conf config = { + .event_dev_id = evdev, + .timer_adapter_id = TEST_ADAPTER_ID, + .timer_tick_ns = bkt_tck_ns, + .max_tmo_ns = max_tmo_ns, + .nb_timers = MAX_TIMERS * 10, + }; + uint32_t caps = 0; + const char *pool_name = "timdev_test_pool"; + + global_bkt_tck_ns = bkt_tck_ns; + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), + "failed to get adapter capabilities"); + if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { + timdev = rte_event_timer_adapter_create_ext(&config, + test_port_conf_cb, + NULL); + setup_adapter_service(timdev); + using_services = true; + } else + timdev = rte_event_timer_adapter_create(&config); + + TEST_ASSERT_NOT_NULL(timdev, + "failed to create event timer ring"); + + TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0, + "failed to Start event timer adapter"); + + /* Create event timer mempool */ + eventdev_test_mempool = rte_mempool_create(pool_name, + MAX_TIMERS * 2, + sizeof(struct rte_event_timer), /* element size*/ + 0, /* cache size*/ + 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (!eventdev_test_mempool) { + printf("ERROR creating mempool\n"); + return TEST_FAILED; + } + + return TEST_SUCCESS; +} + +static int +timdev_setup_usec(void) +{ + return using_services ? + /* Max timeout is 10,000us and bucket interval is 100us */ + _timdev_setup(1E7, 1E5) : + /* Max timeout is 100us and bucket interval is 1us */ + _timdev_setup(1E5, 1E3); +} + +static int +timdev_setup_usec_multicore(void) +{ + return using_services ? + /* Max timeout is 10,000us and bucket interval is 100us */ + _timdev_setup(1E7, 1E5) : + /* Max timeout is 100us and bucket interval is 1us */ + _timdev_setup(1E5, 1E3); +} + +static int +timdev_setup_msec(void) +{ + /* Max timeout is 2 mins, and bucket interval is 100 ms */ + return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10); +} + +static int +timdev_setup_sec(void) +{ + /* Max timeout is 100sec and bucket interval is 1sec */ + return _timdev_setup(1E11, 1E9); +} + +static int +timdev_setup_sec_multicore(void) +{ + /* Max timeout is 100sec and bucket interval is 1sec */ + return _timdev_setup(1E11, 1E9); +} + +static void +timdev_teardown(void) +{ + rte_event_timer_adapter_stop(timdev); + rte_event_timer_adapter_free(timdev); + + rte_mempool_free(eventdev_test_mempool); +} + +static inline int +test_timer_state(void) +{ + struct rte_event_timer *ev_tim; + struct rte_event ev; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; + ev_tim->timeout_ticks = 120; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, + "Armed timer exceeding max_timeout."); + TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, + "Improper timer state set expected %d returned %d", + RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); + + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; + ev_tim->timeout_ticks = 10; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); + TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, + "Improper timer state set expected %d returned %d", + RTE_EVENT_TIMER_ARMED, ev_tim->state); + + if (!using_services) + rte_delay_us(20); + else + rte_delay_us(1000 + 200); + TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, + "Armed timer failed to trigger."); + + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; + ev_tim->timeout_ticks = 90; + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); + TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), + 1, "Failed to cancel armed timer"); + TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED, + "Improper timer state set expected %d returned %d", + RTE_EVENT_TIMER_CANCELED, ev_tim->state); + + rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); + + return TEST_SUCCESS; +} + +static inline int +_arm_timers(uint64_t timeout_tcks, uint64_t timers) +{ + uint64_t i; + struct rte_event_timer *ev_tim; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = timeout_tcks, + }; + + for (i = 0; i < timers; i++) { + + TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, + (void **)&ev_tim), + "mempool alloc failed"); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, + 1), 1, "Failed to arm timer %d", + rte_errno); + } + + return TEST_SUCCESS; +} + +static inline int +_wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count, + uint64_t cancel_count) +{ + uint8_t valid_event; + uint64_t events = 0; + uint64_t wait_start, max_wait; + struct rte_event ev; + + max_wait = rte_get_timer_hz() * wait_sec; + wait_start = rte_get_timer_cycles(); + while (1) { + if (rte_get_timer_cycles() - wait_start > max_wait) { + if (events + cancel_count != arm_count) + TEST_ASSERT_SUCCESS(max_wait, + "Max time limit for timers exceeded."); + break; + } + + valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); + if (!valid_event) + continue; + + rte_mempool_put(eventdev_test_mempool, ev.event_ptr); + events++; + } + + return TEST_SUCCESS; +} + +static inline int +test_timer_arm(void) +{ + TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), + "Failed to arm timers"); + TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), + "Timer triggered count doesn't match arm count"); + return TEST_SUCCESS; +} + +static int +_arm_wrapper(void *arg) +{ + RTE_SET_USED(arg); + + TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS), + "Failed to arm timers"); + + return TEST_SUCCESS; +} + +static inline int +test_timer_arm_multicore(void) +{ + + uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0); + uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0); + + rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1); + rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2); + + rte_eal_mp_wait_lcore(); + TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), + "Timer triggered count doesn't match arm count"); + + return TEST_SUCCESS; +} + +#define MAX_BURST 16 +static inline int +_arm_timers_burst(uint64_t timeout_tcks, uint64_t timers) +{ + uint64_t i; + int j; + struct rte_event_timer *ev_tim[MAX_BURST]; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = timeout_tcks, + }; + + for (i = 0; i < timers / MAX_BURST; i++) { + TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( + eventdev_test_mempool, + (void **)ev_tim, MAX_BURST), + "mempool alloc failed"); + + for (j = 0; j < MAX_BURST; j++) { + *ev_tim[j] = tim; + ev_tim[j]->ev.event_ptr = ev_tim[j]; + } + + TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, + ev_tim, tim.timeout_ticks, MAX_BURST), + MAX_BURST, "Failed to arm timer %d", rte_errno); + } + + return TEST_SUCCESS; +} + +static inline int +test_timer_arm_burst(void) +{ + TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), + "Failed to arm timers"); + TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0), + "Timer triggered count doesn't match arm count"); + + return TEST_SUCCESS; +} + +static int +_arm_wrapper_burst(void *arg) +{ + RTE_SET_USED(arg); + + TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS), + "Failed to arm timers"); + + return TEST_SUCCESS; +} + +static inline int +test_timer_arm_burst_multicore(void) +{ + rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1); + rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2); + + rte_eal_mp_wait_lcore(); + TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0), + "Timer triggered count doesn't match arm count"); + + return TEST_SUCCESS; +} + +static inline int +test_timer_cancel(void) +{ + uint64_t i; + struct rte_event_timer *ev_tim; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 20, + }; + + for (i = 0; i < MAX_TIMERS; i++) { + TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, + (void **)&ev_tim), + "mempool alloc failed"); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, + 1), 1, "Failed to arm timer %d", + rte_errno); + + rte_delay_us(100 + (i % 5000)); + + TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, + &ev_tim, 1), 1, + "Failed to cancel event timer %d", rte_errno); + rte_mempool_put(eventdev_test_mempool, ev_tim); + } + + + TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, + MAX_TIMERS), + "Timer triggered count doesn't match arm, cancel count"); + + return TEST_SUCCESS; +} + +static int +_cancel_producer(uint64_t timeout_tcks, uint64_t timers) +{ + uint64_t i; + struct rte_event_timer *ev_tim; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = timeout_tcks, + }; + + for (i = 0; i < timers; i++) { + TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, + (void **)&ev_tim), + "mempool alloc failed"); + + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, + 1), 1, "Failed to arm timer %d", + rte_errno); + + TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED, + "Failed to arm event timer"); + + while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0) + ; + } + + return TEST_SUCCESS; +} + +static int +_cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers) +{ + + uint64_t i; + int j, ret; + struct rte_event_timer *ev_tim[MAX_BURST]; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = timeout_tcks, + }; + int arm_count = 0; + + for (i = 0; i < timers / MAX_BURST; i++) { + TEST_ASSERT_SUCCESS(rte_mempool_get_bulk( + eventdev_test_mempool, + (void **)ev_tim, MAX_BURST), + "mempool alloc failed"); + + for (j = 0; j < MAX_BURST; j++) { + *ev_tim[j] = tim; + ev_tim[j]->ev.event_ptr = ev_tim[j]; + } + + TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev, + ev_tim, tim.timeout_ticks, MAX_BURST), + MAX_BURST, "Failed to arm timer %d", rte_errno); + + for (j = 0; j < MAX_BURST; j++) + TEST_ASSERT_EQUAL(ev_tim[j]->state, + RTE_EVENT_TIMER_ARMED, + "Event timer not armed, state = %d", + ev_tim[j]->state); + + ret = rte_ring_enqueue_bulk(timer_producer_ring, + (void **)ev_tim, MAX_BURST, NULL); + TEST_ASSERT_EQUAL(ret, MAX_BURST, + "Failed to enqueue event timers to ring"); + arm_count += ret; + } + + TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS, + "Failed to arm expected number of event timers"); + + return TEST_SUCCESS; +} + +static int +_cancel_producer_wrapper(void *args) +{ + RTE_SET_USED(args); + + return _cancel_producer(20, MAX_TIMERS); +} + +static int +_cancel_producer_burst_wrapper(void *args) +{ + RTE_SET_USED(args); + + return _cancel_producer_burst(100, MAX_TIMERS); +} + +static int +_cancel_thread(void *args) +{ + RTE_SET_USED(args); + struct rte_event_timer *ev_tim = NULL; + uint64_t cancel_count = 0; + uint16_t ret; + + while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { + if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim)) + continue; + + ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); + rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); + cancel_count++; + } + + return TEST_SUCCESS; +} + +static int +_cancel_burst_thread(void *args) +{ + RTE_SET_USED(args); + + int ret, i, n; + struct rte_event_timer *ev_tim[MAX_BURST]; + uint64_t cancel_count = 0; + uint64_t dequeue_count = 0; + + while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { + n = rte_ring_dequeue_burst(timer_producer_ring, + (void **)ev_tim, MAX_BURST, NULL); + if (!n) + continue; + + dequeue_count += n; + + for (i = 0; i < n; i++) + TEST_ASSERT_EQUAL(ev_tim[i]->state, + RTE_EVENT_TIMER_ARMED, + "Event timer not armed, state = %d", + ev_tim[i]->state); + + ret = rte_event_timer_cancel_burst(timdev, ev_tim, n); + TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of " + "event timers"); + rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim, + ret); + + cancel_count += ret; + } + + TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS, + "Failed to cancel expected number of timers: " + "expected = %d, cancel_count = %"PRIu64", " + "dequeue_count = %"PRIu64"\n", MAX_TIMERS, + cancel_count, dequeue_count); + + return TEST_SUCCESS; +} + +static inline int +test_timer_cancel_multicore(void) +{ + arm_done = 0; + timer_producer_ring = rte_ring_create("timer_cancel_queue", + MAX_TIMERS * 2, rte_socket_id(), 0); + TEST_ASSERT_NOT_NULL(timer_producer_ring, + "Unable to reserve memory for ring"); + + rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3); + rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1); + rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2); + + rte_eal_wait_lcore(test_lcore1); + rte_eal_wait_lcore(test_lcore2); + arm_done = 1; + rte_eal_wait_lcore(test_lcore3); + rte_ring_free(timer_producer_ring); + + TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2, + MAX_TIMERS * 2), + "Timer triggered count doesn't match arm count"); + + return TEST_SUCCESS; +} + +static inline int +test_timer_cancel_burst_multicore(void) +{ + arm_done = 0; + timer_producer_ring = rte_ring_create("timer_cancel_queue", + MAX_TIMERS * 2, rte_socket_id(), 0); + TEST_ASSERT_NOT_NULL(timer_producer_ring, + "Unable to reserve memory for ring"); + + rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2); + rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL, + test_lcore1); + + rte_eal_wait_lcore(test_lcore1); + arm_done = 1; + rte_eal_wait_lcore(test_lcore2); + rte_ring_free(timer_producer_ring); + + TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, + MAX_TIMERS), + "Timer triggered count doesn't match arm count"); + + return TEST_SUCCESS; +} + +static inline int +test_timer_cancel_random(void) +{ + uint64_t i; + uint64_t events_canceled = 0; + struct rte_event_timer *ev_tim; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = 0, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 20, + }; + + for (i = 0; i < MAX_TIMERS; i++) { + + TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool, + (void **)&ev_tim), + "mempool alloc failed"); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, + 1), 1, "Failed to arm timer %d", + rte_errno); + + if (rte_rand() & 1) { + rte_delay_us(100 + (i % 5000)); + TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst( + timdev, + &ev_tim, 1), 1, + "Failed to cancel event timer %d", rte_errno); + rte_mempool_put(eventdev_test_mempool, ev_tim); + events_canceled++; + } + } + + TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS, + events_canceled), + "Timer triggered count doesn't match arm, cancel count"); + + return TEST_SUCCESS; +} + +/* Check that the adapter can be created correctly */ +static int +adapter_create(void) +{ + int adapter_id = 0; + struct rte_event_timer_adapter *adapter, *adapter2; + + struct rte_event_timer_adapter_conf conf = { + .event_dev_id = evdev + 1, // invalid event dev id + .timer_adapter_id = adapter_id, + .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, + .timer_tick_ns = NSECPERSEC / 10, + .max_tmo_ns = 180 * NSECPERSEC, + .nb_timers = MAX_TIMERS, + .flags = 0, + }; + uint32_t caps = 0; + + /* Test invalid conf */ + adapter = rte_event_timer_adapter_create(&conf); + TEST_ASSERT_NULL(adapter, "Created adapter with invalid " + "event device id"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for " + "invalid event device id"); + + /* Test valid conf */ + conf.event_dev_id = evdev; + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps), + "failed to get adapter capabilities"); + if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) + adapter = rte_event_timer_adapter_create_ext(&conf, + test_port_conf_cb, + NULL); + else + adapter = rte_event_timer_adapter_create(&conf); + TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid " + "configuration"); + + /* Test existing id */ + adapter2 = rte_event_timer_adapter_create(&conf); + TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id"); + TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing " + "id"); + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter), + "Failed to free adapter"); + + rte_mempool_free(eventdev_test_mempool); + + return TEST_SUCCESS; +} + + +/* Test that adapter can be freed correctly. */ +static int +adapter_free(void) +{ + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), + "Failed to stop adapter"); + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), + "Failed to free valid adapter"); + + /* Test free of already freed adapter */ + TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), + "Freed adapter that was already freed"); + + /* Test free of null adapter */ + timdev = NULL; + TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev), + "Freed null adapter"); + + rte_mempool_free(eventdev_test_mempool); + + return TEST_SUCCESS; +} + +/* Test that adapter info can be retrieved and is correct. */ +static int +adapter_get_info(void) +{ + struct rte_event_timer_adapter_info info; + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info), + "Failed to get adapter info"); + + if (using_services) + TEST_ASSERT_EQUAL(info.event_dev_port_id, 1, + "Expected port id = 1, got port id = %d", + info.event_dev_port_id); + + return TEST_SUCCESS; +} + +/* Test adapter lookup via adapter ID. */ +static int +adapter_lookup(void) +{ + struct rte_event_timer_adapter *adapter; + + adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID); + TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter"); + + return TEST_SUCCESS; +} + +static int +adapter_start(void) +{ + TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC, + NSECPERSEC / 10), + "Failed to start adapter"); + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_start(timdev), + "Failed to repeatedly start adapter"); + + return TEST_SUCCESS; +} + +/* Test that adapter stops correctly. */ +static int +adapter_stop(void) +{ + struct rte_event_timer_adapter *l_adapter = NULL; + + /* Test adapter stop */ + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev), + "Failed to stop event adapter"); + + TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter), + "Erroneously stopped null event adapter"); + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev), + "Failed to free adapter"); + + rte_mempool_free(eventdev_test_mempool); + + return TEST_SUCCESS; +} + +/* Test increment and reset of ev_enq_count stat */ +static int +stat_inc_reset_ev_enq(void) +{ + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; + struct rte_event evs[BATCH_SIZE]; + struct rte_event_timer_adapter_stats stats; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, + num_evtims); + TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", + ret); + + for (i = 0; i < num_evtims; i++) { + *evtims[i] = init_tim; + evtims[i]->ev.event_ptr = evtims[i]; + } + + ret = rte_event_timer_adapter_stats_get(timdev, &stats); + TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); + TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at " + "startup"); + + /* Test with the max value for the adapter */ + ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); + TEST_ASSERT_EQUAL(ret, num_evtims, + "Failed to arm all event timers: attempted = %d, " + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + + rte_delay_ms(1000); + +#define MAX_TRIES num_evtims + int sum = 0; + int tries = 0; + bool done = false; + while (!done) { + sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, + RTE_DIM(evs), 10); + if (sum >= num_evtims || ++tries >= MAX_TRIES) + done = true; + + rte_delay_ms(10); + } + + TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " + "got %d", num_evtims, sum); + + TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); + + rte_delay_ms(100); + + /* Make sure the eventdev is still empty */ + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), + 10); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); + + /* Check stats again */ + ret = rte_event_timer_adapter_stats_get(timdev, &stats); + TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); + TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims, + "Expected enqueue stat = %d; got %d", num_evtims, + (int)stats.ev_enq_count); + + /* Reset and check again */ + ret = rte_event_timer_adapter_stats_reset(timdev); + TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats"); + + ret = rte_event_timer_adapter_stats_get(timdev, &stats); + TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats"); + TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, + "Expected enqueue stat = %d; got %d", 0, + (int)stats.ev_enq_count); + + rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, + num_evtims); + + return TEST_SUCCESS; +} + +/* Test various cases in arming timers */ +static int +event_timer_arm(void) +{ + uint16_t n; + int ret; + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + + /* Test single timer arm succeeds */ + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer " + "in incorrect state"); + + /* Test arm of armed timer fails */ + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "expected return value from " + "rte_event_timer_arm_burst: 0, got: %d", ret); + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after arming already armed timer"); + + /* Let timer expire */ + rte_delay_ms(1000); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + + rte_mempool_put(eventdev_test_mempool, evtim); + + return TEST_SUCCESS; +} + +/* This test checks that repeated references to the same event timer in the + * arm request work as expected; only the first one through should succeed. + */ +static int +event_timer_arm_double(void) +{ + uint16_t n; + int ret; + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + + struct rte_event_timer *evtim_arr[] = {evtim, evtim}; + ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr)); + TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from " + "rte_event_timer_arm_burst"); + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-arm"); + + /* Let timer expire */ + rte_delay_ms(600); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " + "expected: 1, actual: %d", n); + + rte_mempool_put(eventdev_test_mempool, evtim); + + return TEST_SUCCESS; +} + +/* Test the timer expiry event is generated at the expected time. */ +static int +event_timer_arm_expiry(void) +{ + uint16_t n; + int ret; + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event_timer *evtim2 = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Set up an event timer */ + *evtim = init_tim; + evtim->timeout_ticks = 30, // expire in 3 secs + evtim->ev.event_ptr = evtim; + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s", + rte_strerror(rte_errno)); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " + "timer in incorrect state"); + + rte_delay_ms(2999); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); + + /* Delay 100 ms to account for the adapter tick window - should let us + * dequeue one event + */ + rte_delay_ms(100); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " + "expiry events", n); + TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, + "Dequeued unexpected type of event"); + + /* Check that we recover the original event timer and then free it */ + evtim2 = evs[0].event_ptr; + TEST_ASSERT_EQUAL(evtim, evtim2, + "Failed to recover pointer to original event timer"); + rte_mempool_put(eventdev_test_mempool, evtim2); + + return TEST_SUCCESS; +} + +/* Check that rearming a timer works as expected. */ +static int +event_timer_arm_rearm(void) +{ + uint16_t n; + int ret; + struct rte_event_timer *evtim = NULL; + struct rte_event_timer *evtim2 = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Set up a timer */ + *evtim = init_tim; + evtim->timeout_ticks = 1; // expire in 0.1 sec + evtim->ev.event_ptr = evtim; + + /* Arm it */ + ret = rte_event_timer_arm_burst(timdev, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + + /* Add 100ms to account for the adapter tick window */ + rte_delay_ms(100 + 100); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + + /* Recover the timer through the event that was dequeued. */ + evtim2 = evs[0].event_ptr; + TEST_ASSERT_EQUAL(evtim, evtim2, + "Failed to recover pointer to original event timer"); + + /* Need to reset state in case implementation can't do it */ + evtim2->state = RTE_EVENT_TIMER_NOT_ARMED; + + /* Rearm it */ + ret = rte_event_timer_arm_burst(timdev, &evtim2, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + + /* Add 100ms to account for the adapter tick window */ + rte_delay_ms(100 + 100); + + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + + /* Free it */ + evtim2 = evs[0].event_ptr; + TEST_ASSERT_EQUAL(evtim, evtim2, + "Failed to recover pointer to original event timer"); + rte_mempool_put(eventdev_test_mempool, evtim2); + + return TEST_SUCCESS; +} + +/* Check that the adapter handles the max specified number of timers as + * expected. + */ +static int +event_timer_arm_max(void) +{ + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, + num_evtims); + TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d", + ret); + + for (i = 0; i < num_evtims; i++) { + *evtims[i] = init_tim; + evtims[i]->ev.event_ptr = evtims[i]; + } + + /* Test with the max value for the adapter */ + ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims); + TEST_ASSERT_EQUAL(ret, num_evtims, + "Failed to arm all event timers: attempted = %d, " + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + + rte_delay_ms(1000); + +#define MAX_TRIES num_evtims + int sum = 0; + int tries = 0; + bool done = false; + while (!done) { + sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, + RTE_DIM(evs), 10); + if (sum >= num_evtims || ++tries >= MAX_TRIES) + done = true; + + rte_delay_ms(10); + } + + TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " + "got %d", num_evtims, sum); + + TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); + + rte_delay_ms(100); + + /* Make sure the eventdev is still empty */ + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), + 10); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); + + rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims, + num_evtims); + + return TEST_SUCCESS; +} + +/* Check that creating an event timer with incorrect event sched type fails. */ +static int +event_timer_arm_invalid_sched_type(void) +{ + int ret; + struct rte_event_timer *evtim = NULL; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + if (!using_services) + return -ENOTSUP; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type + + ret = rte_event_timer_arm_burst(timdev, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " + "sched type, but didn't"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" + " arm fail with invalid queue"); + + rte_mempool_put(eventdev_test_mempool, &evtim); + + return TEST_SUCCESS; +} + +/* Check that creating an event timer with a timeout value that is too small or + * too big fails. + */ +static int +event_timer_arm_invalid_timeout(void) +{ + int ret; + struct rte_event_timer *evtim = NULL; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + evtim->timeout_ticks = 0; // timeout too small + + ret = rte_event_timer_arm_burst(timdev, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " + "timeout, but didn't"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" + " arm fail with invalid timeout"); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY, + "Unexpected event timer state"); + + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + evtim->timeout_ticks = 1801; // timeout too big + + ret = rte_event_timer_arm_burst(timdev, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid " + "timeout, but didn't"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after" + " arm fail with invalid timeout"); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE, + "Unexpected event timer state"); + + rte_mempool_put(eventdev_test_mempool, evtim); + + return TEST_SUCCESS; +} + +static int +event_timer_cancel(void) +{ + uint16_t n; + int ret; + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Check that cancelling an uninited timer fails */ + ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " + "uninited timer"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " + "cancelling uninited timer"); + + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + evtim->timeout_ticks = 30; // expire in 3 sec + + /* Check that cancelling an inited but unarmed timer fails */ + ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling " + "unarmed timer"); + TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after " + "cancelling unarmed timer"); + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, + "evtim in incorrect state"); + + /* Delay 1 sec */ + rte_delay_ms(1000); + + ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n", + rte_strerror(rte_errno)); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, + "evtim in incorrect state"); + + rte_delay_ms(3000); + + /* Make sure that no expiry event was generated */ + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); + + return TEST_SUCCESS; +} + +static int +event_timer_cancel_double(void) +{ + uint16_t n; + int ret; + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, + .ev.sched_type = RTE_SCHED_TYPE_ATOMIC, + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + .timeout_ticks = 5, // expire in .5 sec + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { + /* Failed to get an event timer object */ + return TEST_FAILED; + } + + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; + evtim->timeout_ticks = 30; // expire in 3 sec + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, + "timer in unexpected state"); + + /* Now, test that referencing the same timer twice in the same call + * fails + */ + struct rte_event_timer *evtim_arr[] = {evtim, evtim}; + ret = rte_event_timer_cancel_burst(adapter, evtim_arr, + RTE_DIM(evtim_arr)); + + /* Two requests to cancel same timer, only one should succeed */ + TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer " + "twice"); + + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-cancel: rte_errno = %d", rte_errno); + + rte_delay_ms(3000); + + /* Still make sure that no expiry event was generated */ + n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); + + return TEST_SUCCESS; +} + +/* Check that event timer adapter tick resolution works as expected by testing + * the number of adapter ticks that occur within a particular time interval. + */ +static int +adapter_tick_resolution(void) +{ + struct rte_event_timer_adapter_stats stats; + uint64_t adapter_tick_count; + + /* Only run this test in the software driver case */ + if (!using_services) + return -ENOTSUP; + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev), + "Failed to reset stats"); + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, + &stats), "Failed to get adapter stats"); + TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count " + "not zeroed out"); + + /* Delay 1 second; should let at least 10 ticks occur with the default + * adapter configuration used by this test. + */ + rte_delay_ms(1000); + + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev, + &stats), "Failed to get adapter stats"); + + adapter_tick_count = stats.adapter_tick_count; + TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12, + "Expected 10-12 adapter ticks, got %"PRIu64"\n", + adapter_tick_count); + + return TEST_SUCCESS; +} + +static int +adapter_create_max(void) +{ + int i; + uint32_t svc_start_count, svc_end_count; + struct rte_event_timer_adapter *adapters[ + RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1]; + + struct rte_event_timer_adapter_conf conf = { + .event_dev_id = evdev, + // timer_adapter_id set in loop + .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, + .timer_tick_ns = NSECPERSEC / 10, + .max_tmo_ns = 180 * NSECPERSEC, + .nb_timers = MAX_TIMERS, + .flags = 0, + }; + + if (!using_services) + return -ENOTSUP; + + svc_start_count = rte_service_get_count(); + + /* This test expects that there are sufficient service IDs available + * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to + * be less than RTE_SERVICE_NUM_MAX if anything else uses a service + * (the SW event device, for example). + */ + for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) { + conf.timer_adapter_id = i; + adapters[i] = rte_event_timer_adapter_create_ext(&conf, + test_port_conf_cb, NULL); + TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter " + "%d", i); + } + + conf.timer_adapter_id = i; + adapters[i] = rte_event_timer_adapter_create(&conf); + TEST_ASSERT_NULL(adapters[i], "Created too many adapters"); + + /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services + * have been created + */ + svc_end_count = rte_service_get_count(); + TEST_ASSERT_EQUAL(svc_end_count - svc_start_count, + RTE_EVENT_TIMER_ADAPTER_NUM_MAX, + "Failed to create expected number of services"); + + for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) + TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]), + "Failed to free adapter %d", i); + + /* Check that service count is back to where it was at start */ + svc_end_count = rte_service_get_count(); + TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release " + "correct number of services"); + + return TEST_SUCCESS; +} + +static struct unit_test_suite event_timer_adptr_functional_testsuite = { + .suite_name = "event timer functional test suite", + .setup = testsuite_setup, + .teardown = testsuite_teardown, + .unit_test_cases = { + TEST_CASE_ST(timdev_setup_usec, timdev_teardown, + test_timer_state), + TEST_CASE_ST(timdev_setup_usec, timdev_teardown, + test_timer_arm), + TEST_CASE_ST(timdev_setup_usec, timdev_teardown, + test_timer_arm_burst), + TEST_CASE_ST(timdev_setup_sec, timdev_teardown, + test_timer_cancel), + TEST_CASE_ST(timdev_setup_sec, timdev_teardown, + test_timer_cancel_random), + TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, + test_timer_arm_multicore), + TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown, + test_timer_arm_burst_multicore), + TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, + test_timer_cancel_multicore), + TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown, + test_timer_cancel_burst_multicore), + TEST_CASE(adapter_create), + TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + adapter_get_info), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + adapter_lookup), + TEST_CASE_ST(NULL, timdev_teardown, + adapter_start), + TEST_CASE_ST(timdev_setup_msec, NULL, + adapter_stop), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + stat_inc_reset_ev_enq), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_double), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_expiry), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_rearm), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_max), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_invalid_sched_type), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_arm_invalid_timeout), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_cancel), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + event_timer_cancel_double), + TEST_CASE_ST(timdev_setup_msec, timdev_teardown, + adapter_tick_resolution), + TEST_CASE(adapter_create_max), + TEST_CASES_END() /**< NULL terminate unit test array */ + } +}; + +static int +test_event_timer_adapter_func(void) +{ + return unit_test_suite_runner(&event_timer_adptr_functional_testsuite); +} + +REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func); diff --git a/test/test/test_interrupts.c b/test/test/test_interrupts.c index 31a70a0c..dc19175d 100644 --- a/test/test/test_interrupts.c +++ b/test/test/test_interrupts.c @@ -20,6 +20,7 @@ enum test_interrupt_handle_type { TEST_INTERRUPT_HANDLE_VALID, TEST_INTERRUPT_HANDLE_VALID_UIO, TEST_INTERRUPT_HANDLE_VALID_ALARM, + TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT, TEST_INTERRUPT_HANDLE_CASE1, TEST_INTERRUPT_HANDLE_MAX }; @@ -80,6 +81,10 @@ test_interrupt_init(void) intr_handles[TEST_INTERRUPT_HANDLE_VALID_ALARM].type = RTE_INTR_HANDLE_ALARM; + intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].fd = pfds.readfd; + intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].type = + RTE_INTR_HANDLE_DEV_EVENT; + intr_handles[TEST_INTERRUPT_HANDLE_CASE1].fd = pfds.writefd; intr_handles[TEST_INTERRUPT_HANDLE_CASE1].type = RTE_INTR_HANDLE_UIO; @@ -250,6 +255,14 @@ test_interrupt_enable(void) return -1; } + /* check with specific valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT]; + if (rte_intr_enable(&test_intr_handle) == 0) { + printf("unexpectedly enable a specific intr_handle " + "successfully\n"); + return -1; + } + /* check with valid handler and its type */ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1]; if (rte_intr_enable(&test_intr_handle) < 0) { @@ -306,6 +319,14 @@ test_interrupt_disable(void) return -1; } + /* check with specific valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT]; + if (rte_intr_disable(&test_intr_handle) == 0) { + printf("unexpectedly disable a specific intr_handle " + "successfully\n"); + return -1; + } + /* check with valid handler and its type */ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1]; if (rte_intr_disable(&test_intr_handle) < 0) { @@ -393,9 +414,17 @@ test_interrupt(void) goto out; } + printf("Check valid device event interrupt full path\n"); + if (test_interrupt_full_path_check( + TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) { + printf("failure occurred during checking valid device event " + "interrupt full path\n"); + goto out; + } + printf("Check valid alarm interrupt full path\n"); - if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_ALARM) - < 0) { + if (test_interrupt_full_path_check( + TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) { printf("failure occurred during checking valid alarm " "interrupt full path\n"); goto out; @@ -513,6 +542,12 @@ out: rte_intr_callback_unregister(&test_intr_handle, test_interrupt_callback_1, (void *)-1); + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT]; + rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback, (void *)-1); + rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback_1, (void *)-1); + rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL); /* deinit */ test_interrupt_deinit(); diff --git a/test/test/test_kni.c b/test/test/test_kni.c index e4839cdb..56773c8a 100644 --- a/test/test/test_kni.c +++ b/test/test/test_kni.c @@ -357,6 +357,8 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp) struct rte_kni_conf conf; struct rte_eth_dev_info info; struct rte_kni_ops ops; + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus = NULL; if (!mp) return -1; @@ -366,8 +368,13 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp) memset(&ops, 0, sizeof(ops)); rte_eth_dev_info_get(port_id, &info); - conf.addr = info.pci_dev->addr; - conf.id = info.pci_dev->id; + if (info.device) + bus = rte_bus_find_by_device(info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(info.device); + conf.addr = pci_dev->addr; + conf.id = pci_dev->id; + } snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT); /* core id 1 configured for kernel thread */ @@ -465,6 +472,8 @@ test_kni(void) struct rte_kni_conf conf; struct rte_eth_dev_info info; struct rte_kni_ops ops; + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; /* Initialize KNI subsytem */ rte_kni_init(KNI_TEST_MAX_PORTS); @@ -480,7 +489,7 @@ test_kni(void) return -1; } - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); if (nb_ports == 0) { printf("no supported nic port found\n"); return -1; @@ -523,8 +532,15 @@ test_kni(void) memset(&conf, 0, sizeof(conf)); memset(&ops, 0, sizeof(ops)); rte_eth_dev_info_get(port_id, &info); - conf.addr = info.pci_dev->addr; - conf.id = info.pci_dev->id; + if (info.device) + bus = rte_bus_find_by_device(info.device); + else + bus = NULL; + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(info.device); + conf.addr = pci_dev->addr; + conf.id = pci_dev->id; + } conf.group_id = port_id; conf.mbuf_size = MAX_PACKET_SZ; @@ -552,8 +568,15 @@ test_kni(void) memset(&info, 0, sizeof(info)); memset(&ops, 0, sizeof(ops)); rte_eth_dev_info_get(port_id, &info); - conf.addr = info.pci_dev->addr; - conf.id = info.pci_dev->id; + if (info.device) + bus = rte_bus_find_by_device(info.device); + else + bus = NULL; + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(info.device); + conf.addr = pci_dev->addr; + conf.id = pci_dev->id; + } conf.group_id = port_id; conf.mbuf_size = MAX_PACKET_SZ; diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c index 426877a2..33c1fab0 100644 --- a/test/test/test_link_bonding_mode4.c +++ b/test/test/test_link_bonding_mode4.c @@ -425,7 +425,7 @@ test_setup(void) TEST_ASSERT(retval >= 0, "Failed to create ring ethdev '%s'\n", name); - port->port_id = rte_eth_dev_count() - 1; + port->port_id = rte_eth_dev_count_avail() - 1; } retval = configure_ethdev(port->port_id, 1); diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c index 4cc08f5a..6a1a28d2 100644 --- a/test/test/test_link_bonding_rssconf.c +++ b/test/test/test_link_bonding_rssconf.c @@ -521,7 +521,7 @@ test_setup(void) FOR_EACH_PORT(n, port) { port = &test_params.slave_ports[n]; - port_id = rte_eth_dev_count(); + port_id = rte_eth_dev_count_avail(); snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id); retval = rte_vdev_init(name, "size=64,copy=0"); diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c index d23192cf..4b5abb4e 100644 --- a/test/test/test_malloc.c +++ b/test/test/test_malloc.c @@ -12,6 +12,7 @@ #include <rte_common.h> #include <rte_memory.h> +#include <rte_eal_memconfig.h> #include <rte_per_lcore.h> #include <rte_launch.h> #include <rte_eal.h> @@ -378,7 +379,7 @@ test_realloc(void) printf("NULL pointer returned from rte_zmalloc\n"); return -1; } - snprintf(ptr1, size1, "%s" ,hello_str); + strlcpy(ptr1, hello_str, size1); char *ptr2 = rte_realloc(ptr1, size2, RTE_CACHE_LINE_SIZE); if (!ptr2){ rte_free(ptr1); @@ -705,20 +706,22 @@ err_return: return -1; } +static int +check_socket_mem(const struct rte_memseg_list *msl, void *arg) +{ + int32_t *socket = arg; + + return *socket == msl->socket_id; +} + /* Check if memory is available on a specific socket */ static int is_mem_on_socket(int32_t socket) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - unsigned i; - - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if (socket == ms[i].socket_id) - return 1; - } - return 0; + return rte_memseg_list_walk(check_socket_mem, &socket); } + /* * Find what socket a memory address is on. Only works for addresses within * memsegs, not heap or stack... @@ -726,16 +729,9 @@ is_mem_on_socket(int32_t socket) static int32_t addr_to_socket(void * addr) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - unsigned i; + const struct rte_memseg *ms = rte_mem_virt2memseg(addr, NULL); + return ms == NULL ? -1 : ms->socket_id; - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if ((ms[i].addr <= addr) && - ((uintptr_t)addr < - ((uintptr_t)ms[i].addr + (uintptr_t)ms[i].len))) - return ms[i].socket_id; - } - return -1; } /* Test using rte_[c|m|zm]alloc_socket() on a specific socket */ diff --git a/test/test/test_memory.c b/test/test/test_memory.c index 972321f1..b96bca77 100644 --- a/test/test/test_memory.c +++ b/test/test/test_memory.c @@ -5,8 +5,11 @@ #include <stdio.h> #include <stdint.h> +#include <rte_eal.h> +#include <rte_eal_memconfig.h> #include <rte_memory.h> #include <rte_common.h> +#include <rte_memzone.h> #include "test.h" @@ -23,12 +26,21 @@ */ static int +check_mem(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg __rte_unused) +{ + volatile uint8_t *mem = (volatile uint8_t *) ms->addr; + size_t i, max = ms->len; + + for (i = 0; i < max; i++, mem++) + *mem; + return 0; +} + +static int test_memory(void) { uint64_t s; - unsigned i; - size_t j; - const struct rte_memseg *mem; /* * dump the mapped memory: the python-expect script checks @@ -45,14 +57,7 @@ test_memory(void) } /* try to read memory (should not segfault) */ - mem = rte_eal_get_physmem_layout(); - for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) { - - /* check memory */ - for (j = 0; j<mem[i].len; j++) { - *((volatile uint8_t *) mem[i].addr + j); - } - } + rte_memseg_walk(check_mem, NULL); return 0; } diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c index 63f921e2..eebb1f24 100644 --- a/test/test/test_mempool.c +++ b/test/test/test_mempool.c @@ -327,17 +327,17 @@ test_mempool_sp_sc(void) } if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { printf("Cannot lookup mempool from its name\n"); - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } lcore_next = rte_get_next_lcore(lcore_id, 0, 1); if (lcore_next >= RTE_MAX_LCORE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } rte_spinlock_init(&scsp_spinlock); memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); @@ -348,7 +348,10 @@ test_mempool_sp_sc(void) if (rte_eal_wait_lcore(lcore_next) < 0) ret = -1; + +err: rte_mempool_free(mp_spsc); + mp_spsc = NULL; return ret; } @@ -444,34 +447,6 @@ test_mempool_same_name_twice_creation(void) return 0; } -/* - * Basic test for mempool_xmem functions. - */ -static int -test_mempool_xmem_misc(void) -{ - uint32_t elt_num, total_size; - size_t sz; - ssize_t usz; - - elt_num = MAX_KEEP; - total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL); - sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX, - 0); - - usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1, - MEMPOOL_PG_SHIFT_MAX, 0); - - if (sz != (size_t)usz) { - printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) " - "returns: %#zx, while expected: %#zx;\n", - __func__, elt_num, total_size, sz, (size_t)usz); - return -1; - } - - return 0; -} - static void walk_cb(struct rte_mempool *mp, void *userdata __rte_unused) { @@ -596,9 +571,6 @@ test_mempool(void) if (test_mempool_same_name_twice_creation() < 0) goto err; - if (test_mempool_xmem_misc() < 0) - goto err; - /* test the stack handler */ if (test_mempool_basic(mp_stack, 1) < 0) goto err; diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c index 8ece1ac8..efcf7327 100644 --- a/test/test/test_memzone.c +++ b/test/test/test_memzone.c @@ -104,28 +104,47 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void) return 0; } +struct walk_arg { + int hugepage_2MB_avail; + int hugepage_1GB_avail; + int hugepage_16MB_avail; + int hugepage_16GB_avail; +}; +static int +find_available_pagesz(const struct rte_memseg_list *msl, void *arg) +{ + struct walk_arg *wa = arg; + + if (msl->page_sz == RTE_PGSIZE_2M) + wa->hugepage_2MB_avail = 1; + if (msl->page_sz == RTE_PGSIZE_1G) + wa->hugepage_1GB_avail = 1; + if (msl->page_sz == RTE_PGSIZE_16M) + wa->hugepage_16MB_avail = 1; + if (msl->page_sz == RTE_PGSIZE_16G) + wa->hugepage_16GB_avail = 1; + + return 0; +} + static int test_memzone_reserve_flags(void) { const struct rte_memzone *mz; - const struct rte_memseg *ms; - int hugepage_2MB_avail = 0; - int hugepage_1GB_avail = 0; - int hugepage_16MB_avail = 0; - int hugepage_16GB_avail = 0; + struct walk_arg wa; + int hugepage_2MB_avail, hugepage_1GB_avail; + int hugepage_16MB_avail, hugepage_16GB_avail; const size_t size = 100; - int i = 0; - ms = rte_eal_get_physmem_layout(); - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - if (ms[i].hugepage_sz == RTE_PGSIZE_2M) - hugepage_2MB_avail = 1; - if (ms[i].hugepage_sz == RTE_PGSIZE_1G) - hugepage_1GB_avail = 1; - if (ms[i].hugepage_sz == RTE_PGSIZE_16M) - hugepage_16MB_avail = 1; - if (ms[i].hugepage_sz == RTE_PGSIZE_16G) - hugepage_16GB_avail = 1; - } + + memset(&wa, 0, sizeof(wa)); + + rte_memseg_list_walk(find_available_pagesz, &wa); + + hugepage_2MB_avail = wa.hugepage_2MB_avail; + hugepage_1GB_avail = wa.hugepage_1GB_avail; + hugepage_16MB_avail = wa.hugepage_16MB_avail; + hugepage_16GB_avail = wa.hugepage_16GB_avail; + /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */ if (hugepage_2MB_avail) printf("2MB Huge pages available\n"); @@ -890,7 +909,7 @@ test_memzone_basic(void) const struct rte_memzone *mz; int memzone_cnt_after, memzone_cnt_expected; int memzone_cnt_before = - rte_eal_get_configuration()->mem_config->memzone_cnt; + rte_eal_get_configuration()->mem_config->memzones.count; memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100, SOCKET_ID_ANY, 0); @@ -914,7 +933,7 @@ test_memzone_basic(void) (memzone3 != NULL) + (memzone4 != NULL); memzone_cnt_after = - rte_eal_get_configuration()->mem_config->memzone_cnt; + rte_eal_get_configuration()->mem_config->memzones.count; if (memzone_cnt_after != memzone_cnt_expected) return -1; @@ -993,7 +1012,7 @@ test_memzone_basic(void) } memzone_cnt_after = - rte_eal_get_configuration()->mem_config->memzone_cnt; + rte_eal_get_configuration()->mem_config->memzones.count; if (memzone_cnt_after != memzone_cnt_before) return -1; @@ -1014,7 +1033,8 @@ static int test_memzone(void) { /* take note of how many memzones were allocated before running */ - int memzone_cnt = rte_eal_get_configuration()->mem_config->memzone_cnt; + int memzone_cnt = + rte_eal_get_configuration()->mem_config->memzones.count; printf("test basic memzone API\n"); if (test_memzone_basic() < 0) diff --git a/test/test/test_meter.c b/test/test/test_meter.c index 9f6abf99..8bb47e75 100644 --- a/test/test/test_meter.c +++ b/test/test/test_meter.c @@ -53,43 +53,43 @@ static inline int tm_test_srtcm_config(void) { #define SRTCM_CFG_MSG "srtcm_config" - struct rte_meter_srtcm sm; + struct rte_meter_srtcm_profile sp; struct rte_meter_srtcm_params sparams1; /* invalid parameter test */ - if(rte_meter_srtcm_config(NULL, NULL) == 0) + if (rte_meter_srtcm_profile_config(NULL, NULL) == 0) melog(SRTCM_CFG_MSG); - if(rte_meter_srtcm_config(&sm, NULL) == 0) + if (rte_meter_srtcm_profile_config(&sp, NULL) == 0) melog(SRTCM_CFG_MSG); - if(rte_meter_srtcm_config(NULL, &sparams) == 0) + if (rte_meter_srtcm_profile_config(NULL, &sparams) == 0) melog(SRTCM_CFG_MSG); /* cbs and ebs can't both be zero */ sparams1 = sparams; sparams1.cbs = 0; sparams1.ebs = 0; - if(rte_meter_srtcm_config(&sm, &sparams1) == 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0) melog(SRTCM_CFG_MSG); /* cir should never be 0 */ sparams1 = sparams; sparams1.cir = 0; - if(rte_meter_srtcm_config(&sm, &sparams1) == 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0) melog(SRTCM_CFG_MSG); /* one of ebs and cbs can be zero, should be successful */ sparams1 = sparams; sparams1.ebs = 0; - if(rte_meter_srtcm_config(&sm, &sparams1) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0) melog(SRTCM_CFG_MSG); sparams1 = sparams; sparams1.cbs = 0; - if(rte_meter_srtcm_config(&sm, &sparams1) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0) melog(SRTCM_CFG_MSG); /* usual parameter, should be successful */ - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) melog(SRTCM_CFG_MSG); return 0; @@ -102,47 +102,47 @@ tm_test_srtcm_config(void) static inline int tm_test_trtcm_config(void) { - struct rte_meter_trtcm tm; + struct rte_meter_trtcm_profile tp; struct rte_meter_trtcm_params tparams1; #define TRTCM_CFG_MSG "trtcm_config" /* invalid parameter test */ - if(rte_meter_trtcm_config(NULL, NULL) == 0) + if (rte_meter_trtcm_profile_config(NULL, NULL) == 0) melog(TRTCM_CFG_MSG); - if(rte_meter_trtcm_config(&tm, NULL) == 0) + if (rte_meter_trtcm_profile_config(&tp, NULL) == 0) melog(TRTCM_CFG_MSG); - if(rte_meter_trtcm_config(NULL, &tparams) == 0) + if (rte_meter_trtcm_profile_config(NULL, &tparams) == 0) melog(TRTCM_CFG_MSG); /* cir, cbs, pir and pbs never be zero */ tparams1 = tparams; tparams1.cir = 0; - if(rte_meter_trtcm_config(&tm, &tparams1) == 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0) melog(TRTCM_CFG_MSG); tparams1 = tparams; tparams1.cbs = 0; - if(rte_meter_trtcm_config(&tm, &tparams1) == 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0) melog(TRTCM_CFG_MSG); tparams1 = tparams; tparams1.pbs = 0; - if(rte_meter_trtcm_config(&tm, &tparams1) == 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0) melog(TRTCM_CFG_MSG); tparams1 = tparams; tparams1.pir = 0; - if(rte_meter_trtcm_config(&tm, &tparams1) == 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0) melog(TRTCM_CFG_MSG); /* pir should be greater or equal to cir */ tparams1 = tparams; tparams1.pir = tparams1.cir - 1; - if(rte_meter_trtcm_config(&tm, &tparams1) == 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0) melog(TRTCM_CFG_MSG" pir < cir test"); /* usual parameter, should be successful */ - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) melog(TRTCM_CFG_MSG); return 0; @@ -155,41 +155,50 @@ static inline int tm_test_srtcm_color_blind_check(void) { #define SRTCM_BLIND_CHECK_MSG "srtcm_blind_check" + struct rte_meter_srtcm_profile sp; struct rte_meter_srtcm sm; uint64_t time; uint64_t hz = rte_get_tsc_hz(); /* Test green */ - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_BLIND_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_blind_check( - &sm, time, TM_TEST_SRTCM_CBS_DF - 1) + if (rte_meter_srtcm_color_blind_check( + &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1) != e_RTE_METER_GREEN) melog(SRTCM_BLIND_CHECK_MSG" GREEN"); /* Test yellow */ - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_BLIND_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_blind_check( - &sm, time, TM_TEST_SRTCM_CBS_DF + 1) + if (rte_meter_srtcm_color_blind_check( + &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1) != e_RTE_METER_YELLOW) melog(SRTCM_BLIND_CHECK_MSG" YELLOW"); - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_BLIND_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_blind_check( - &sm, time, (uint32_t)sm.ebs - 1) != e_RTE_METER_YELLOW) + if (rte_meter_srtcm_color_blind_check( + &sm, &sp, time, (uint32_t)sp.ebs - 1) != e_RTE_METER_YELLOW) melog(SRTCM_BLIND_CHECK_MSG" YELLOW"); /* Test red */ - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_BLIND_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_blind_check( - &sm, time, TM_TEST_SRTCM_EBS_DF + 1) + if (rte_meter_srtcm_color_blind_check( + &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1) != e_RTE_METER_RED) melog(SRTCM_BLIND_CHECK_MSG" RED"); @@ -206,41 +215,50 @@ tm_test_trtcm_color_blind_check(void) #define TRTCM_BLIND_CHECK_MSG "trtcm_blind_check" uint64_t time; + struct rte_meter_trtcm_profile tp; struct rte_meter_trtcm tm; uint64_t hz = rte_get_tsc_hz(); /* Test green */ - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_BLIND_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_blind_check( - &tm, time, TM_TEST_TRTCM_CBS_DF - 1) + if (rte_meter_trtcm_color_blind_check( + &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1) != e_RTE_METER_GREEN) melog(TRTCM_BLIND_CHECK_MSG" GREEN"); /* Test yellow */ - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_BLIND_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_blind_check( - &tm, time, TM_TEST_TRTCM_CBS_DF + 1) + if (rte_meter_trtcm_color_blind_check( + &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1) != e_RTE_METER_YELLOW) melog(TRTCM_BLIND_CHECK_MSG" YELLOW"); - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_BLIND_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_blind_check( - &tm, time, TM_TEST_TRTCM_PBS_DF - 1) + if (rte_meter_trtcm_color_blind_check( + &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1) != e_RTE_METER_YELLOW) melog(TRTCM_BLIND_CHECK_MSG" YELLOW"); /* Test red */ - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_BLIND_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_BLIND_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_blind_check( - &tm, time, TM_TEST_TRTCM_PBS_DF + 1) + if (rte_meter_trtcm_color_blind_check( + &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1) != e_RTE_METER_RED) melog(TRTCM_BLIND_CHECK_MSG" RED"); @@ -262,36 +280,45 @@ tm_test_srtcm_aware_check (enum rte_meter_color in[4], enum rte_meter_color out[4]) { #define SRTCM_AWARE_CHECK_MSG "srtcm_aware_check" + struct rte_meter_srtcm_profile sp; struct rte_meter_srtcm sm; uint64_t time; uint64_t hz = rte_get_tsc_hz(); - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_AWARE_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_aware_check( - &sm, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0]) + if (rte_meter_srtcm_color_aware_check( + &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0]) melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]); - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_AWARE_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_aware_check( - &sm, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1]) + if (rte_meter_srtcm_color_aware_check( + &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1]) melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]); - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_AWARE_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_aware_check( - &sm, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2]) + if (rte_meter_srtcm_color_aware_check( + &sm, &sp, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2]) melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]); - if(rte_meter_srtcm_config(&sm, &sparams) != 0) + if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0) + melog(SRTCM_AWARE_CHECK_MSG); + if (rte_meter_srtcm_config(&sm, &sp) != 0) melog(SRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_srtcm_color_aware_check( - &sm, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3]) + if (rte_meter_srtcm_color_aware_check( + &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3]) melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]); return 0; @@ -317,7 +344,7 @@ tm_test_srtcm_color_aware_check(void) out[1] = e_RTE_METER_YELLOW; out[2] = e_RTE_METER_YELLOW; out[3] = e_RTE_METER_RED; - if(tm_test_srtcm_aware_check(in, out) != 0) + if (tm_test_srtcm_aware_check(in, out) != 0) return -1; /** @@ -329,7 +356,7 @@ tm_test_srtcm_color_aware_check(void) out[1] = e_RTE_METER_YELLOW; out[2] = e_RTE_METER_YELLOW; out[3] = e_RTE_METER_RED; - if(tm_test_srtcm_aware_check(in, out) != 0) + if (tm_test_srtcm_aware_check(in, out) != 0) return -1; /** @@ -341,7 +368,7 @@ tm_test_srtcm_color_aware_check(void) out[1] = e_RTE_METER_RED; out[2] = e_RTE_METER_RED; out[3] = e_RTE_METER_RED; - if(tm_test_srtcm_aware_check(in, out) != 0) + if (tm_test_srtcm_aware_check(in, out) != 0) return -1; return 0; @@ -360,36 +387,45 @@ tm_test_trtcm_aware_check (enum rte_meter_color in[4], enum rte_meter_color out[4]) { #define TRTCM_AWARE_CHECK_MSG "trtcm_aware_check" + struct rte_meter_trtcm_profile tp; struct rte_meter_trtcm tm; uint64_t time; uint64_t hz = rte_get_tsc_hz(); - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_AWARE_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_aware_check( - &tm, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0]) + if (rte_meter_trtcm_color_aware_check( + &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]); - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_AWARE_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_aware_check( - &tm, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1]) + if (rte_meter_trtcm_color_aware_check( + &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]); - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_AWARE_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_aware_check( - &tm, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2]) + if (rte_meter_trtcm_color_aware_check( + &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]); - if(rte_meter_trtcm_config(&tm, &tparams) != 0) + if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0) + melog(TRTCM_AWARE_CHECK_MSG); + if (rte_meter_trtcm_config(&tm, &tp) != 0) melog(TRTCM_AWARE_CHECK_MSG); time = rte_get_tsc_cycles() + hz; - if(rte_meter_trtcm_color_aware_check( - &tm, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3]) + if (rte_meter_trtcm_color_aware_check( + &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3]) melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]); return 0; @@ -415,7 +451,7 @@ tm_test_trtcm_color_aware_check(void) out[1] = e_RTE_METER_YELLOW; out[2] = e_RTE_METER_YELLOW; out[3] = e_RTE_METER_RED; - if(tm_test_trtcm_aware_check(in, out) != 0) + if (tm_test_trtcm_aware_check(in, out) != 0) return -1; in[0] = in[1] = in[2] = in[3] = e_RTE_METER_YELLOW; @@ -423,7 +459,7 @@ tm_test_trtcm_color_aware_check(void) out[1] = e_RTE_METER_YELLOW; out[2] = e_RTE_METER_YELLOW; out[3] = e_RTE_METER_RED; - if(tm_test_trtcm_aware_check(in, out) != 0) + if (tm_test_trtcm_aware_check(in, out) != 0) return -1; in[0] = in[1] = in[2] = in[3] = e_RTE_METER_RED; @@ -431,7 +467,7 @@ tm_test_trtcm_color_aware_check(void) out[1] = e_RTE_METER_RED; out[2] = e_RTE_METER_RED; out[3] = e_RTE_METER_RED; - if(tm_test_trtcm_aware_check(in, out) != 0) + if (tm_test_trtcm_aware_check(in, out) != 0) return -1; return 0; @@ -443,22 +479,22 @@ tm_test_trtcm_color_aware_check(void) static int test_meter(void) { - if(tm_test_srtcm_config() != 0 ) + if (tm_test_srtcm_config() != 0) return -1; - if(tm_test_trtcm_config() != 0 ) + if (tm_test_trtcm_config() != 0) return -1; - if(tm_test_srtcm_color_blind_check() != 0) + if (tm_test_srtcm_color_blind_check() != 0) return -1; - if(tm_test_trtcm_color_blind_check()!= 0) + if (tm_test_trtcm_color_blind_check() != 0) return -1; - if(tm_test_srtcm_color_aware_check()!= 0) + if (tm_test_srtcm_color_aware_check() != 0) return -1; - if(tm_test_trtcm_color_aware_check()!= 0) + if (tm_test_trtcm_color_aware_check() != 0) return -1; return 0; diff --git a/test/test/test_pmd_perf.c b/test/test/test_pmd_perf.c index 911dd762..54bc4f6b 100644 --- a/test/test/test_pmd_perf.c +++ b/test/test/test_pmd_perf.c @@ -676,7 +676,7 @@ test_pmd_perf(void) signal(SIGUSR1, signal_handler); signal(SIGUSR2, signal_handler); - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); if (nb_ports < NB_ETHPORTS_USED) { printf("At least %u port(s) used for perf. test\n", NB_ETHPORTS_USED); @@ -698,7 +698,7 @@ test_pmd_perf(void) reset_count(); num = 0; - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if (socketid == -1) { socketid = rte_eth_dev_socket_id(portid); slave_id = alloc_lcore(socketid); @@ -791,7 +791,7 @@ test_pmd_perf(void) return -1; /* port tear down */ - for (portid = 0; portid < nb_ports; portid++) { + RTE_ETH_FOREACH_DEV(portid) { if (socketid != rte_eth_dev_socket_id(portid)) continue; diff --git a/test/test/test_pmd_ring.c b/test/test/test_pmd_ring.c index 4b891014..21962012 100644 --- a/test/test/test_pmd_ring.c +++ b/test/test/test_pmd_ring.c @@ -399,7 +399,7 @@ test_pmd_ring(void) int port, cmdl_port0 = -1; uint8_t nb_ports; - nb_ports = rte_eth_dev_count(); + nb_ports = rte_eth_dev_count_avail(); printf("nb_ports=%d\n", (int)nb_ports); /* create the rings and eth_rings in the test code. @@ -473,7 +473,7 @@ test_pmd_ring(void) return -1; /* find a port created with the --vdev=net_ring0 command line option */ - for (port = 0; port < nb_ports; port++) { + RTE_ETH_FOREACH_DEV(port) { struct rte_eth_dev_info dev_info; rte_eth_dev_info_get(port, &dev_info); diff --git a/test/test/test_power_acpi_cpufreq.c b/test/test/test_power_acpi_cpufreq.c index 3bfd0335..8da2dcc1 100644 --- a/test/test/test_power_acpi_cpufreq.c +++ b/test/test/test_power_acpi_cpufreq.c @@ -27,7 +27,7 @@ test_power_acpi_cpufreq(void) #define TEST_POWER_FREQS_NUM_MAX ((unsigned)RTE_MAX_LCORE_FREQS) #define TEST_POWER_SYSFILE_CUR_FREQ \ - "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq" + "/sys/devices/system/cpu/cpu%u/cpufreq/cpuinfo_cur_freq" static uint32_t total_freq_num; static uint32_t freqs[TEST_POWER_FREQS_NUM_MAX]; diff --git a/test/test/test_power_kvm_vm.c b/test/test/test_power_kvm_vm.c index 91b31c44..bce706de 100644 --- a/test/test/test_power_kvm_vm.c +++ b/test/test/test_power_kvm_vm.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2010-2018 Intel Corporation */ #include <stdio.h> @@ -98,7 +98,8 @@ test_power_kvm_vm(void) printf("Cannot initialise power management for lcore %u, this " "may occur if environment is not configured " "correctly(KVM VM) or operating in another valid " - "Power management environment\n", TEST_POWER_VM_LCORE_ID); + "Power management environment\n", + TEST_POWER_VM_LCORE_ID); rte_power_unset_env(); return -1; } @@ -175,6 +176,22 @@ test_power_kvm_vm(void) goto fail_all; } + /* Test KVM_VM Enable Turbo of valid core */ + ret = rte_power_freq_enable_turbo(TEST_POWER_VM_LCORE_ID); + if (ret == -1) { + printf("rte_power_freq_enable_turbo failed on valid lcore" + "%u\n", TEST_POWER_VM_LCORE_ID); + goto fail_all; + } + + /* Test KVM_VM Disable Turbo of valid core */ + ret = rte_power_freq_disable_turbo(TEST_POWER_VM_LCORE_ID); + if (ret == -1) { + printf("rte_power_freq_disable_turbo failed on valid lcore" + "%u\n", TEST_POWER_VM_LCORE_ID); + goto fail_all; + } + /* Test frequency up of valid lcore */ ret = rte_power_freq_up(TEST_POWER_VM_LCORE_ID); if (ret != 1) { diff --git a/test/test/test_reorder.c b/test/test/test_reorder.c index 65e4f38b..ccee4d08 100644 --- a/test/test/test_reorder.c +++ b/test/test/test_reorder.c @@ -146,11 +146,11 @@ test_reorder_insert(void) b = rte_reorder_create("test_insert", rte_socket_id(), size); TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer"); - ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs); - TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool"); - - for (i = 0; i < num_bufs; i++) + for (i = 0; i < num_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(p); + TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n"); bufs[i]->seqn = i; + } /* This should fill up order buffer: * reorder_seq = 0 @@ -165,6 +165,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[i] = NULL; } /* early packet - should move mbufs to ready buf and move sequence window @@ -179,6 +180,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[4] = NULL; /* early packet from current sequence window - full ready buffer */ bufs[5]->seqn = 2 * size; @@ -189,6 +191,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[5] = NULL; /* late packet */ bufs[6]->seqn = 3 * size; @@ -199,11 +202,15 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[6] = NULL; ret = 0; exit: - rte_mempool_put_bulk(p, (void *)bufs, num_bufs); rte_reorder_free(b); + for (i = 0; i < num_bufs; i++) { + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + } return ret; } @@ -219,6 +226,10 @@ test_reorder_drain(void) int ret = 0; unsigned i, cnt; + /* initialize all robufs to NULL */ + for (i = 0; i < num_bufs; i++) + robufs[i] = NULL; + /* This would create a reorder buffer instance consisting of: * reorder_seq = 0 * ready_buf: RB[size] = {NULL, NULL, NULL, NULL} @@ -227,9 +238,6 @@ test_reorder_drain(void) b = rte_reorder_create("test_drain", rte_socket_id(), size); TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer"); - ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs); - TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool"); - /* Check no drained packets if reorder is empty */ cnt = rte_reorder_drain(b, robufs, 1); if (cnt != 0) { @@ -239,8 +247,11 @@ test_reorder_drain(void) goto exit; } - for (i = 0; i < num_bufs; i++) + for (i = 0; i < num_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(p); + TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n"); bufs[i]->seqn = i; + } /* Insert packet with seqn 1: * reorder_seq = 0 @@ -248,6 +259,7 @@ test_reorder_drain(void) * OB[] = {1, NULL, NULL, NULL} */ rte_reorder_insert(b, bufs[1]); + bufs[1] = NULL; cnt = rte_reorder_drain(b, robufs, 1); if (cnt != 1) { @@ -256,6 +268,8 @@ test_reorder_drain(void) ret = -1; goto exit; } + if (robufs[0] != NULL) + rte_pktmbuf_free(robufs[i]); /* Insert more packets * RB[] = {NULL, NULL, NULL, NULL} @@ -263,18 +277,22 @@ test_reorder_drain(void) */ rte_reorder_insert(b, bufs[2]); rte_reorder_insert(b, bufs[3]); + bufs[2] = NULL; + bufs[3] = NULL; /* Insert more packets * RB[] = {NULL, NULL, NULL, NULL} * OB[] = {NULL, 2, 3, 4} */ rte_reorder_insert(b, bufs[4]); + bufs[4] = NULL; /* Insert more packets * RB[] = {2, 3, 4, NULL} * OB[] = {NULL, NULL, 7, NULL} */ rte_reorder_insert(b, bufs[7]); + bufs[7] = NULL; /* drained expected packets */ cnt = rte_reorder_drain(b, robufs, 4); @@ -284,6 +302,10 @@ test_reorder_drain(void) ret = -1; goto exit; } + for (i = 0; i < 3; i++) { + if (robufs[i] != NULL) + rte_pktmbuf_free(robufs[i]); + } /* * RB[] = {NULL, NULL, NULL, NULL} @@ -298,8 +320,13 @@ test_reorder_drain(void) } ret = 0; exit: - rte_mempool_put_bulk(p, (void *)bufs, num_bufs); rte_reorder_free(b); + for (i = 0; i < num_bufs; i++) { + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + if (robufs[i] != NULL) + rte_pktmbuf_free(robufs[i]); + } return ret; } diff --git a/test/test/test_resource.c b/test/test/test_resource.c index a3a82f13..8f41e3ba 100644 --- a/test/test/test_resource.c +++ b/test/test/test_resource.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 RehiveTech. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of RehiveTech nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 RehiveTech. All rights reserved. */ #include <stdio.h> diff --git a/test/test/test_table.c b/test/test/test_table.c index f01652dc..a4b0ed65 100644 --- a/test/test/test_table.c +++ b/test/test/test_table.c @@ -54,6 +54,17 @@ uint64_t pipeline_test_hash(void *key, return signature; } +uint32_t pipeline_test_hash_cuckoo(const void *key, + __attribute__((unused)) uint32_t key_size, + __attribute__((unused)) uint32_t seed) +{ + const uint32_t *k32 = key; + uint32_t ip_dst = rte_be_to_cpu_32(k32[0]); + uint32_t signature = ip_dst; + + return signature; +} + static void app_free_resources(void) { int i; diff --git a/test/test/test_table.h b/test/test/test_table.h index a4d3ca0c..a66342cb 100644 --- a/test/test/test_table.h +++ b/test/test/test_table.h @@ -6,6 +6,7 @@ #include <rte_table_lpm.h> #include <rte_table_lpm_ipv6.h> #include <rte_table_hash.h> +#include <rte_table_hash_cuckoo.h> #include <rte_table_array.h> #include <rte_pipeline.h> @@ -106,6 +107,11 @@ uint64_t pipeline_test_hash( __attribute__((unused)) uint32_t key_size, __attribute__((unused)) uint64_t seed); +uint32_t pipeline_test_hash_cuckoo( + const void *key, + __attribute__((unused)) uint32_t key_size, + __attribute__((unused)) uint32_t seed); + /* Extern variables */ extern struct rte_pipeline *p; extern struct rte_ring *rings_rx[N_PORTS]; diff --git a/test/test/test_table_combined.c b/test/test/test_table_combined.c index 5e8e119a..73ad0155 100644 --- a/test/test/test_table_combined.c +++ b/test/test/test_table_combined.c @@ -778,14 +778,14 @@ test_table_hash_cuckoo_combined(void) int status, i; /* Traffic flow */ - struct rte_table_hash_params cuckoo_params = { + struct rte_table_hash_cuckoo_params cuckoo_params = { .name = "TABLE", .key_size = 32, .key_offset = APP_METADATA_OFFSET(32), .key_mask = NULL, .n_keys = 1 << 16, .n_buckets = 1 << 16, - .f_hash = pipeline_test_hash, + .f_hash = pipeline_test_hash_cuckoo, .seed = 0, }; diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c index 055a1a4e..441338ac 100644 --- a/test/test/test_table_pipeline.c +++ b/test/test/test_table_pipeline.c @@ -69,9 +69,9 @@ rte_pipeline_table_action_handler_hit table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts, uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg); -rte_pipeline_table_action_handler_miss +static int table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts, - uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg); + uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg); rte_pipeline_table_action_handler_hit table_action_0x00(__attribute__((unused)) struct rte_pipeline *p, @@ -101,11 +101,11 @@ table_action_stub_hit(__attribute__((unused)) struct rte_pipeline *p, return 0; } -rte_pipeline_table_action_handler_miss +static int table_action_stub_miss(struct rte_pipeline *p, __attribute__((unused)) struct rte_mbuf **pkts, uint64_t pkts_mask, - __attribute__((unused)) struct rte_pipeline_table_entry **entry, + __attribute__((unused)) struct rte_pipeline_table_entry *entry, __attribute__((unused)) void *arg) { printf("STUB Table Action Miss - setting mask to 0x%"PRIx64"\n", @@ -517,8 +517,7 @@ test_table_pipeline(void) /* TEST - one packet per port */ action_handler_hit = NULL; - action_handler_miss = - (rte_pipeline_table_action_handler_miss) table_action_stub_miss; + action_handler_miss = table_action_stub_miss; table_entry_default_action = RTE_PIPELINE_ACTION_PORT; override_miss_mask = 0x01; /* one packet per port */ setup_pipeline(e_TEST_STUB); @@ -553,8 +552,7 @@ test_table_pipeline(void) printf("TEST - two tables, hitmask override to 0x01\n"); connect_miss_action_to_table = 1; - action_handler_miss = - (rte_pipeline_table_action_handler_miss)table_action_stub_miss; + action_handler_miss = table_action_stub_miss; override_miss_mask = 0x01; setup_pipeline(e_TEST_STUB); if (test_pipeline_single_filter(e_TEST_STUB, 2) < 0) diff --git a/test/test/test_table_tables.c b/test/test/test_table_tables.c index a7a69b85..20df2e92 100644 --- a/test/test/test_table_tables.c +++ b/test/test/test_table_tables.c @@ -903,14 +903,14 @@ test_table_hash_cuckoo(void) uint32_t entry_size = 1; /* Initialize params and create tables */ - struct rte_table_hash_params cuckoo_params = { + struct rte_table_hash_cuckoo_params cuckoo_params = { .name = "TABLE", .key_size = 32, .key_offset = APP_METADATA_OFFSET(32), .key_mask = NULL, .n_keys = 1 << 16, .n_buckets = 1 << 16, - .f_hash = (rte_table_hash_op_hash)pipeline_test_hash, + .f_hash = pipeline_test_hash_cuckoo, .seed = 0, }; @@ -941,7 +941,7 @@ test_table_hash_cuckoo(void) if (table != NULL) return -4; - cuckoo_params.f_hash = pipeline_test_hash; + cuckoo_params.f_hash = pipeline_test_hash_cuckoo; cuckoo_params.name = NULL; table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params, diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c index 2f5b31db..f8ddc2db 100644 --- a/test/test/virtual_pmd.c +++ b/test/test/virtual_pmd.c @@ -216,10 +216,11 @@ static void virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused) {} -static void +static int virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev, __rte_unused struct ether_addr *addr) { + return 0; } static const struct eth_dev_ops virtual_ethdev_default_dev_ops = { @@ -589,6 +590,8 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; + rte_eth_dev_probing_finish(eth_dev); + return eth_dev->data->port_id; err: |