diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:15:11 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:45:54 +0000 |
commit | 055c52583a2794da8ba1e85a48cce3832372b12f (patch) | |
tree | 8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /examples/l3fwd | |
parent | f239aed5e674965691846e8ce3f187dd47523689 (diff) |
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'examples/l3fwd')
-rw-r--r-- | examples/l3fwd/l3fwd.h | 10 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_altivec.h | 284 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_common.h | 2 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_em.c | 19 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_em.h | 6 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_em_hlm.h | 14 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_em_sequential.h | 4 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_lpm.c | 20 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_lpm.h | 4 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_lpm_altivec.h | 164 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_lpm_neon.h | 4 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_lpm_sse.h | 4 | ||||
-rw-r--r-- | examples/l3fwd/l3fwd_neon.h | 3 | ||||
-rw-r--r-- | examples/l3fwd/main.c | 32 |
14 files changed, 511 insertions, 59 deletions
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h index 011ba148..4bb15943 100644 --- a/examples/l3fwd/l3fwd.h +++ b/examples/l3fwd/l3fwd.h @@ -83,7 +83,7 @@ struct mbuf_table { }; struct lcore_rx_queue { - uint8_t port_id; + uint16_t port_id; uint8_t queue_id; } __rte_cache_aligned; @@ -117,7 +117,7 @@ extern struct lcore_conf lcore_conf[RTE_MAX_LCORE]; /* Send burst of packets on an output interface */ static inline int -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) { struct rte_mbuf **m_table; int ret; @@ -139,7 +139,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) /* Enqueue a single packet, and send burst if queue is filled */ static inline int send_single_packet(struct lcore_conf *qconf, - struct rte_mbuf *m, uint8_t port) + struct rte_mbuf *m, uint16_t port) { uint16_t len; @@ -212,11 +212,11 @@ int lpm_check_ptype(int portid); uint16_t -em_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[], +em_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param); uint16_t -lpm_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[], +lpm_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param); int diff --git a/examples/l3fwd/l3fwd_altivec.h b/examples/l3fwd/l3fwd_altivec.h new file mode 100644 index 00000000..a1d25eaa --- /dev/null +++ b/examples/l3fwd/l3fwd_altivec.h @@ -0,0 +1,284 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2017 IBM Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _L3FWD_ALTIVEC_H_ +#define _L3FWD_ALTIVEC_H_ + +#include "l3fwd.h" +#include "l3fwd_common.h" + +/* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. + */ +static inline void +processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP]) +{ + vector unsigned int te[FWDSTEP]; + vector unsigned int ve[FWDSTEP]; + vector unsigned int *p[FWDSTEP]; + + p[0] = rte_pktmbuf_mtod(pkt[0], vector unsigned int *); + p[1] = rte_pktmbuf_mtod(pkt[1], vector unsigned int *); + p[2] = rte_pktmbuf_mtod(pkt[2], vector unsigned int *); + p[3] = rte_pktmbuf_mtod(pkt[3], vector unsigned int *); + + ve[0] = (vector unsigned int)val_eth[dst_port[0]]; + te[0] = *p[0]; + + ve[1] = (vector unsigned int)val_eth[dst_port[1]]; + te[1] = *p[1]; + + ve[2] = (vector unsigned int)val_eth[dst_port[2]]; + te[2] = *p[2]; + + ve[3] = (vector unsigned int)val_eth[dst_port[3]]; + te[3] = *p[3]; + + /* Update first 12 bytes, keep rest bytes intact. */ + te[0] = (vector unsigned int)vec_sel( + (vector unsigned short)ve[0], + (vector unsigned short)te[0], + (vector unsigned short) {0, 0, 0, 0, + 0, 0, 0xffff, 0xffff}); + + te[1] = (vector unsigned int)vec_sel( + (vector unsigned short)ve[1], + (vector unsigned short)te[1], + (vector unsigned short) {0, 0, 0, 0, + 0, 0, 0xffff, 0xffff}); + + te[2] = (vector unsigned int)vec_sel( + (vector unsigned short)ve[2], + (vector unsigned short)te[2], + (vector unsigned short) {0, 0, 0, 0, 0, + 0, 0xffff, 0xffff}); + + te[3] = (vector unsigned int)vec_sel( + (vector unsigned short)ve[3], + (vector unsigned short)te[3], + (vector unsigned short) {0, 0, 0, 0, + 0, 0, 0xffff, 0xffff}); + + *p[0] = te[0]; + *p[1] = te[1]; + *p[2] = te[2]; + *p[3] = te[3]; + + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1), + &dst_port[0], pkt[0]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1), + &dst_port[1], pkt[1]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1), + &dst_port[2], pkt[2]->packet_type); + rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1), + &dst_port[3], pkt[3]->packet_type); +} + +/* + * Group consecutive packets with the same destination port in bursts of 4. + * Suppose we have array of destination ports: + * dst_port[] = {a, b, c, d,, e, ... } + * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>. + * We doing 4 comparisons at once and the result is 4 bit mask. + * This mask is used as an index into prebuild array of pnum values. + */ +static inline uint16_t * +port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, vector unsigned short dp1, + vector unsigned short dp2) +{ + union { + uint16_t u16[FWDSTEP + 1]; + uint64_t u64; + } *pnum = (void *)pn; + + int32_t v; + + v = vec_any_eq(dp1, dp2); + + + /* update last port counter. */ + lp[0] += gptbl[v].lpv; + + /* if dest port value has changed. */ + if (v != GRPMSK) { + pnum->u64 = gptbl[v].pnum; + pnum->u16[FWDSTEP] = 1; + lp = pnum->u16 + gptbl[v].idx; + } + + return lp; +} + +/** + * Process one packet: + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. + */ +static inline void +process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) +{ + struct ether_hdr *eth_hdr; + vector unsigned int te, ve; + + eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + + te = *(vector unsigned int *)eth_hdr; + ve = (vector unsigned int)val_eth[dst_port[0]]; + + rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port, + pkt->packet_type); + + /* dynamically vec_sel te and ve for MASK_ETH (0x3f) */ + te = (vector unsigned int)vec_sel( + (vector unsigned short)ve, + (vector unsigned short)te, + (vector unsigned short){0, 0, 0, 0, + 0, 0, 0xffff, 0xffff}); + + *(vector unsigned int *)eth_hdr = te; +} + +/** + * Send packets burst from pkts_burst to the ports in dst_port array + */ +static __rte_always_inline void +send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, + uint16_t dst_port[MAX_PKT_BURST], int nb_rx) +{ + int32_t k; + int j = 0; + uint16_t dlp; + uint16_t *lp; + uint16_t pnum[MAX_PKT_BURST + 1]; + + /* + * Finish packet processing and group consecutive + * packets with the same destination port. + */ + k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); + if (k != 0) { + vector unsigned short dp1, dp2; + + lp = pnum; + lp[0] = 1; + + processx4_step3(pkts_burst, dst_port); + + /* dp1: <d[0], d[1], d[2], d[3], ... > */ + dp1 = *(vector unsigned short *)dst_port; + + for (j = FWDSTEP; j != k; j += FWDSTEP) { + processx4_step3(&pkts_burst[j], &dst_port[j]); + + /* + * dp2: + * <d[j-3], d[j-2], d[j-1], d[j], ... > + */ + dp2 = *((vector unsigned short *) + &dst_port[j - FWDSTEP + 1]); + lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2); + + /* + * dp1: + * <d[j], d[j+1], d[j+2], d[j+3], ... > + */ + dp1 = vec_sro(dp2, (vector unsigned char) { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])}); + } + + /* + * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... > + */ + dp2 = vec_perm(dp1, (vector unsigned short){}, + (vector unsigned char){0xf9}); + lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2); + + /* + * remove values added by the last repeated + * dst port. + */ + lp[0]--; + dlp = dst_port[j - 1]; + } else { + /* set dlp and lp to the never used values. */ + dlp = BAD_PORT - 1; + lp = pnum + MAX_PKT_BURST; + } + + /* Process up to last 3 packets one by one. */ + switch (nb_rx % FWDSTEP) { + case 3: + process_packet(pkts_burst[j], dst_port + j); + GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); + j++; + /* fall-through */ + case 2: + process_packet(pkts_burst[j], dst_port + j); + GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); + j++; + /* fall-through */ + case 1: + process_packet(pkts_burst[j], dst_port + j); + GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j); + j++; + } + + /* + * Send packets out, through destination port. + * Consecutive packets with the same destination port + * are already grouped together. + * If destination port for the packet equals BAD_PORT, + * then free the packet without sending it out. + */ + for (j = 0; j < nb_rx; j += k) { + + int32_t m; + uint16_t pn; + + pn = dst_port[j]; + k = pnum[j]; + + if (likely(pn != BAD_PORT)) + send_packetsx4(qconf, pn, pkts_burst + j, k); + else + for (m = j; m != j + k; m++) + rte_pktmbuf_free(pkts_burst[m]); + + } +} + +#endif /* _L3FWD_ALTIVEC_H_ */ diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h index 2867365d..7002a43a 100644 --- a/examples/l3fwd/l3fwd_common.h +++ b/examples/l3fwd/l3fwd_common.h @@ -207,7 +207,7 @@ static const struct { }; static __rte_always_inline void -send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[], +send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[], uint32_t num) { uint32_t len, j, n; diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index 53d081bd..2b7c173b 100644 --- a/examples/l3fwd/l3fwd_em.c +++ b/examples/l3fwd/l3fwd_em.c @@ -274,8 +274,8 @@ em_mask_key(void *key, xmm_t mask) #error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain #endif -static inline uint8_t -em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) +static inline uint16_t +em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct) { int ret = 0; union ipv4_5tuple_host key; @@ -292,11 +292,11 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) /* Find destination port */ ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key); - return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]); + return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret]; } -static inline uint8_t -em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) +static inline uint16_t +em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct) { int ret = 0; union ipv6_5tuple_host key; @@ -325,7 +325,7 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) /* Find destination port */ ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key); - return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]); + return (ret < 0) ? portid : ipv6_l3fwd_out_if[ret]; } #if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON @@ -628,7 +628,7 @@ em_parse_ptype(struct rte_mbuf *m) } uint16_t -em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused, +em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts __rte_unused, void *user_param __rte_unused) @@ -649,7 +649,8 @@ em_main_loop(__attribute__((unused)) void *dummy) unsigned lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; - uint8_t portid, queueid; + uint8_t queueid; + uint16_t portid; struct lcore_conf *qconf; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -671,7 +672,7 @@ em_main_loop(__attribute__((unused)) void *dummy) portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; RTE_LOG(INFO, L3FWD, - " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", lcore_id, portid, queueid); } diff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h index d509a1fc..302291d7 100644 --- a/examples/l3fwd/l3fwd_em.h +++ b/examples/l3fwd/l3fwd_em.h @@ -35,12 +35,12 @@ #define __L3FWD_EM_H__ static __rte_always_inline void -l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid, +l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid, struct lcore_conf *qconf) { struct ether_hdr *eth_hdr; struct ipv4_hdr *ipv4_hdr; - uint8_t dst_port; + uint16_t dst_port; uint32_t tcp_or_udp; uint32_t l3_ptypes; @@ -112,7 +112,7 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid, */ static inline void l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t j; diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h index 520672d5..9d7afe05 100644 --- a/examples/l3fwd/l3fwd_em_hlm.h +++ b/examples/l3fwd/l3fwd_em_hlm.h @@ -52,7 +52,7 @@ static __rte_always_inline void em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], - uint8_t portid, uint16_t dst_port[]) + uint16_t portid, uint16_t dst_port[]) { int i; int32_t ret[EM_HASH_LOOKUP_COUNT]; @@ -68,7 +68,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], EM_HASH_LOOKUP_COUNT, ret); for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { - dst_port[i] = (uint8_t) ((ret[i] < 0) ? + dst_port[i] = ((ret[i] < 0) ? portid : ipv4_l3fwd_out_if[ret[i]]); if (dst_port[i] >= RTE_MAX_ETHPORTS || @@ -79,7 +79,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[], static __rte_always_inline void em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], - uint8_t portid, uint16_t dst_port[]) + uint16_t portid, uint16_t dst_port[]) { int i; int32_t ret[EM_HASH_LOOKUP_COUNT]; @@ -95,7 +95,7 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], EM_HASH_LOOKUP_COUNT, ret); for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) { - dst_port[i] = (uint8_t) ((ret[i] < 0) ? + dst_port[i] = ((ret[i] < 0) ? portid : ipv6_l3fwd_out_if[ret[i]]); if (dst_port[i] >= RTE_MAX_ETHPORTS || @@ -106,9 +106,9 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[], static __rte_always_inline uint16_t em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, - uint8_t portid) + uint16_t portid) { - uint8_t next_hop; + uint16_t next_hop; struct ipv4_hdr *ipv4_hdr; struct ipv6_hdr *ipv6_hdr; uint32_t tcp_or_udp; @@ -158,7 +158,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, */ static inline void l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t i, j, pos; uint16_t dst_port[MAX_PKT_BURST]; diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h index cb7c2abb..fa89f0f3 100644 --- a/examples/l3fwd/l3fwd_em_sequential.h +++ b/examples/l3fwd/l3fwd_em_sequential.h @@ -51,7 +51,7 @@ static __rte_always_inline uint16_t em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, - uint8_t portid) + uint16_t portid) { uint8_t next_hop; struct ipv4_hdr *ipv4_hdr; @@ -103,7 +103,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, */ static inline void l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t i, j; uint16_t dst_port[MAX_PKT_BURST]; diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c index ff1e4035..2d0e1724 100644 --- a/examples/l3fwd/l3fwd_lpm.c +++ b/examples/l3fwd/l3fwd_lpm.c @@ -105,7 +105,7 @@ struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS]; struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS]; static inline uint16_t -lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) +lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct) { uint32_t next_hop; struct rte_lpm *ipv4_l3fwd_lookup_struct = @@ -117,7 +117,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct) } static inline uint16_t -lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) +lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct) { uint32_t next_hop; struct rte_lpm6 *ipv6_l3fwd_lookup_struct = @@ -130,7 +130,7 @@ lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct) static __rte_always_inline uint16_t lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, - uint8_t portid) + uint16_t portid) { struct ipv6_hdr *ipv6_hdr; struct ipv4_hdr *ipv4_hdr; @@ -162,7 +162,7 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt, */ static __rte_always_inline uint16_t lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt, - uint32_t dst_ipv4, uint8_t portid) + uint32_t dst_ipv4, uint16_t portid) { uint32_t next_hop; struct ipv6_hdr *ipv6_hdr; @@ -191,6 +191,8 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt, #include "l3fwd_lpm_sse.h" #elif defined RTE_MACHINE_CPUFLAG_NEON #include "l3fwd_lpm_neon.h" +#elif defined(RTE_ARCH_PPC_64) +#include "l3fwd_lpm_altivec.h" #else #include "l3fwd_lpm.h" #endif @@ -203,7 +205,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy) unsigned lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; - uint8_t portid, queueid; + uint16_t portid; + uint8_t queueid; struct lcore_conf *qconf; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -225,7 +228,7 @@ lpm_main_loop(__attribute__((unused)) void *dummy) portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; RTE_LOG(INFO, L3FWD, - " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", lcore_id, portid, queueid); } @@ -263,7 +266,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy) if (nb_rx == 0) continue; -#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON +#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \ + || defined RTE_ARCH_PPC_64 l3fwd_lpm_send_packets(nb_rx, pkts_burst, portid, qconf); #else @@ -413,7 +417,7 @@ lpm_parse_ptype(struct rte_mbuf *m) } uint16_t -lpm_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused, +lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts __rte_unused, void *user_param __rte_unused) diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h index 55c3e832..53b7fc80 100644 --- a/examples/l3fwd/l3fwd_lpm.h +++ b/examples/l3fwd/l3fwd_lpm.h @@ -35,7 +35,7 @@ #define __L3FWD_LPM_H__ static __rte_always_inline void -l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid, +l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid, struct lcore_conf *qconf) { struct ether_hdr *eth_hdr; @@ -104,7 +104,7 @@ l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid, static inline void l3fwd_lpm_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t j; diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h new file mode 100644 index 00000000..36ca983f --- /dev/null +++ b/examples/l3fwd/l3fwd_lpm_altivec.h @@ -0,0 +1,164 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2017 IBM Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __L3FWD_LPM_ALTIVEC_H__ +#define __L3FWD_LPM_ALTIVEC_H__ + +#include "l3fwd_altivec.h" + +/* + * Read packet_type and destination IPV4 addresses from 4 mbufs. + */ +static inline void +processx4_step1(struct rte_mbuf *pkt[FWDSTEP], + vector unsigned int *dip, + uint32_t *ipv4_flag) +{ + struct ipv4_hdr *ipv4_hdr; + struct ether_hdr *eth_hdr; + uint32_t x0, x1, x2, x3; + + eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x0 = ipv4_hdr->dst_addr; + ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4; + + rte_compiler_barrier(); + eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x1 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[1]->packet_type; + + rte_compiler_barrier(); + eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x2 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[2]->packet_type; + + rte_compiler_barrier(); + eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *); + ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + x3 = ipv4_hdr->dst_addr; + ipv4_flag[0] &= pkt[3]->packet_type; + + rte_compiler_barrier(); + dip[0] = (vector unsigned int){x0, x1, x2, x3}; +} + +/* + * Lookup into LPM for destination port. + * If lookup fails, use incoming port (portid) as destination port. + */ +static inline void +processx4_step2(const struct lcore_conf *qconf, + vector unsigned int dip, + uint32_t ipv4_flag, + uint8_t portid, + struct rte_mbuf *pkt[FWDSTEP], + uint16_t dprt[FWDSTEP]) +{ + rte_xmm_t dst; + const vector unsigned char bswap_mask = (vector unsigned char){ + 3, 2, 1, 0, + 7, 6, 5, 4, + 11, 10, 9, 8, + 15, 14, 13, 12}; + + /* Byte swap 4 IPV4 addresses. */ + dip = (vector unsigned int)vec_perm(*(vector unsigned char *)&dip, + (vector unsigned char){}, bswap_mask); + + /* if all 4 packets are IPV4. */ + if (likely(ipv4_flag)) { + rte_lpm_lookupx4(qconf->ipv4_lookup_struct, (xmm_t)dip, + (uint32_t *)&dst, portid); + /* get rid of unused upper 16 bit for each dport. */ + dst.x = (xmm_t)vec_packs(dst.x, dst.x); + *(uint64_t *)dprt = dst.u64[0]; + } else { + dst.x = (xmm_t)dip; + dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0], + dst.u32[0], portid); + dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1], + dst.u32[1], portid); + dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2], + dst.u32[2], portid); + dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3], + dst.u32[3], portid); + } +} + +/* + * Buffer optimized handling of packets, invoked + * from main_loop. + */ +static inline void +l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, + uint8_t portid, struct lcore_conf *qconf) +{ + int32_t j; + uint16_t dst_port[MAX_PKT_BURST]; + vector unsigned int dip[MAX_PKT_BURST / FWDSTEP]; + uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP]; + const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP); + + for (j = 0; j != k; j += FWDSTEP) + processx4_step1(&pkts_burst[j], &dip[j / FWDSTEP], + &ipv4_flag[j / FWDSTEP]); + + for (j = 0; j != k; j += FWDSTEP) + processx4_step2(qconf, dip[j / FWDSTEP], + ipv4_flag[j / FWDSTEP], + portid, &pkts_burst[j], &dst_port[j]); + + /* Classify last up to 3 packets one by one */ + switch (nb_rx % FWDSTEP) { + case 3: + dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid); + j++; + /* fall-through */ + case 2: + dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid); + j++; + /* fall-through */ + case 1: + dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid); + j++; + /* fall-through */ + } + + send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); +} + +#endif /* __L3FWD_LPM_ALTIVEC_H__ */ diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h index baedbfe8..85f314d1 100644 --- a/examples/l3fwd/l3fwd_lpm_neon.h +++ b/examples/l3fwd/l3fwd_lpm_neon.h @@ -82,7 +82,7 @@ static inline void processx4_step2(const struct lcore_conf *qconf, int32x4_t dip, uint32_t ipv4_flag, - uint8_t portid, + uint16_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP]) { @@ -115,7 +115,7 @@ processx4_step2(const struct lcore_conf *qconf, */ static inline void l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t i = 0, j = 0; uint16_t dst_port[MAX_PKT_BURST]; diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h index 4e294c84..d474396e 100644 --- a/examples/l3fwd/l3fwd_lpm_sse.h +++ b/examples/l3fwd/l3fwd_lpm_sse.h @@ -79,7 +79,7 @@ static inline void processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t ipv4_flag, - uint8_t portid, + uint16_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP]) { @@ -112,7 +112,7 @@ processx4_step2(const struct lcore_conf *qconf, */ static inline void l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, - uint8_t portid, struct lcore_conf *qconf) + uint16_t portid, struct lcore_conf *qconf) { int32_t j; uint16_t dst_port[MAX_PKT_BURST]; diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h index 42d50d3c..b319b5a9 100644 --- a/examples/l3fwd/l3fwd_neon.h +++ b/examples/l3fwd/l3fwd_neon.h @@ -114,6 +114,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1, /* update last port counter. */ lp[0] += gptbl[v].lpv; + rte_compiler_barrier(); /* if dest port value has changed. */ if (v != GRPMSK) { @@ -192,7 +193,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, * dp1: * <d[j], d[j+1], d[j+2], d[j+3], ... > */ - dp1 = vextq_u16(dp1, dp1, FWDSTEP - 1); + dp1 = vextq_u16(dp2, dp1, FWDSTEP - 1); } /* diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index 81995fdb..6229568f 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -50,7 +50,6 @@ #include <rte_log.h> #include <rte_memory.h> #include <rte_memcpy.h> -#include <rte_memzone.h> #include <rte_eal.h> #include <rte_launch.h> #include <rte_atomic.h> @@ -60,7 +59,6 @@ #include <rte_per_lcore.h> #include <rte_branch_prediction.h> #include <rte_interrupts.h> -#include <rte_pci.h> #include <rte_random.h> #include <rte_debug.h> #include <rte_ether.h> @@ -124,7 +122,7 @@ uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; struct lcore_conf lcore_conf[RTE_MAX_LCORE]; struct lcore_params { - uint8_t port_id; + uint16_t port_id; uint8_t queue_id; uint8_t lcore_id; } __rte_cache_aligned; @@ -245,7 +243,7 @@ check_lcore_params(void) static int check_port_config(const unsigned nb_ports) { - unsigned portid; + uint16_t portid; uint16_t i; for (i = 0; i < nb_lcore_params; ++i) { @@ -263,7 +261,7 @@ check_port_config(const unsigned nb_ports) } static uint8_t -get_port_n_rx_queues(const uint8_t port) +get_port_n_rx_queues(const uint16_t port) { int queue = -1; uint16_t i; @@ -445,7 +443,7 @@ parse_config(const char *q_arg) static void parse_eth_dest(const char *optarg) { - uint8_t portid; + uint16_t portid; char *port_end; uint8_t c, *dest, peer_addr[6]; @@ -750,11 +748,12 @@ init_mem(unsigned nb_mbuf) /* Check the link status of all ports in up to 9s, and print them finally */ static void -check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) +check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ - uint8_t portid, count, all_ports_up, print_flag = 0; + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; printf("\nChecking link status"); @@ -773,14 +772,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) /* print link status if flag set */ if (print_flag == 1) { if (link.link_status) - printf("Port %d Link Up - speed %u " - "Mbps - %s\n", (uint8_t)portid, - (unsigned)link.link_speed, + printf( + "Port%d Link Up. Speed %u Mbps -%s\n", + portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else - printf("Port %d Link Down\n", - (uint8_t)portid); + printf("Port %d Link Down\n", portid); continue; } /* clear all_ports_up flag if any link down */ @@ -818,7 +816,7 @@ signal_handler(int signum) } static int -prepare_ptype_parser(uint8_t portid, uint16_t queueid) +prepare_ptype_parser(uint16_t portid, uint16_t queueid) { if (parse_ptype) { printf("Port %d: softly parse packet type info\n", portid); @@ -847,10 +845,10 @@ main(int argc, char **argv) struct rte_eth_txconf *txconf; int ret; unsigned nb_ports; - uint16_t queueid; + uint16_t queueid, portid; unsigned lcore_id; uint32_t n_tx_queue, nb_lcores; - uint8_t portid, nb_rx_queue, queue, socketid; + uint8_t nb_rx_queue, queue, socketid; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1048,7 +1046,7 @@ main(int argc, char **argv) } - check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + check_all_ports_link_status(nb_ports, enabled_port_mask); ret = 0; /* launch per-lcore init on every lcore */ |