From 10ae544444cfafc4367d9c1f39f332b6206772bf Mon Sep 17 00:00:00 2001 From: Konstantin Ananyev Date: Wed, 28 Apr 2021 15:39:11 +0000 Subject: bump dpdk version to 20.05 Bump dpdk version to 20.05 and adjust tldk source. Signed-off-by: Konstantin Ananyev Change-Id: Id2ce864ad20b3b347f1ac05cd67c15384e454c52 --- examples/l4fwd/common.h | 38 +++++------ examples/l4fwd/netbe.h | 4 +- examples/l4fwd/parse.c | 7 +- examples/l4fwd/parse.h | 2 +- examples/l4fwd/pkt.c | 168 +++++++++++++++++++++++++----------------------- examples/l4fwd/port.h | 7 +- examples/l4fwd/udp.h | 12 ++-- 7 files changed, 121 insertions(+), 117 deletions(-) (limited to 'examples') diff --git a/examples/l4fwd/common.h b/examples/l4fwd/common.h index b7750d7..a2cd5f6 100644 --- a/examples/l4fwd/common.h +++ b/examples/l4fwd/common.h @@ -357,31 +357,31 @@ fill_dst(struct tle_dest *dst, struct netbe_dev *bed, const struct netbe_dest *bdp, uint16_t l3_type, int32_t sid, uint8_t proto_id) { - struct ether_hdr *eth; - struct ipv4_hdr *ip4h; - struct ipv6_hdr *ip6h; + struct rte_ether_hdr *eth; + struct rte_ipv4_hdr *ip4h; + struct rte_ipv6_hdr *ip6h; dst->dev = bed->dev; dst->head_mp = frag_mpool[sid + 1]; dst->mtu = RTE_MIN(bdp->mtu, bed->port.mtu); dst->l2_len = sizeof(*eth); - eth = (struct ether_hdr *)dst->hdr; + eth = (struct rte_ether_hdr *)dst->hdr; - ether_addr_copy(&bed->port.mac, ð->s_addr); - ether_addr_copy(&bdp->mac, ð->d_addr); + rte_ether_addr_copy(&bed->port.mac, ð->s_addr); + rte_ether_addr_copy(&bdp->mac, ð->d_addr); eth->ether_type = rte_cpu_to_be_16(l3_type); - if (l3_type == ETHER_TYPE_IPv4) { + if (l3_type == RTE_ETHER_TYPE_IPV4) { dst->l3_len = sizeof(*ip4h); - ip4h = (struct ipv4_hdr *)(eth + 1); + ip4h = (struct rte_ipv4_hdr *)(eth + 1); ip4h->version_ihl = 4 << 4 | - sizeof(*ip4h) / IPV4_IHL_MULTIPLIER; + sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER; ip4h->time_to_live = 64; ip4h->next_proto_id = proto_id; - } else if (l3_type == ETHER_TYPE_IPv6) { + } else if (l3_type == RTE_ETHER_TYPE_IPV6) { dst->l3_len = sizeof(*ip6h); - ip6h = (struct ipv6_hdr *)(eth + 1); + ip6h = (struct rte_ipv6_hdr *)(eth + 1); ip6h->vtc_flow = 6 << 4; ip6h->proto = proto_id; ip6h->hop_limits = 64; @@ -402,12 +402,12 @@ netbe_add_dest(struct netbe_lcore *lc, uint32_t dev_idx, uint16_t family, n = lc->dst4_num; dp = lc->dst4 + n; m = RTE_DIM(lc->dst4); - l3_type = ETHER_TYPE_IPv4; + l3_type = RTE_ETHER_TYPE_IPV4; } else { n = lc->dst6_num; dp = lc->dst6 + n; m = RTE_DIM(lc->dst6); - l3_type = ETHER_TYPE_IPv6; + l3_type = RTE_ETHER_TYPE_IPV6; } if (n + dnum >= m) { @@ -441,21 +441,21 @@ netbe_add_dest(struct netbe_lcore *lc, uint32_t dev_idx, uint16_t family, static inline void fill_arp_reply(struct netbe_dev *dev, struct rte_mbuf *m) { - struct ether_hdr *eth; - struct arp_hdr *ahdr; - struct arp_ipv4 *adata; + struct rte_ether_hdr *eth; + struct rte_arp_hdr *ahdr; + struct rte_arp_ipv4 *adata; uint32_t tip; /* set up the ethernet data */ - eth = rte_pktmbuf_mtod(m, struct ether_hdr *); + eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); eth->d_addr = eth->s_addr; eth->s_addr = dev->port.mac; /* set up the arp data */ - ahdr = rte_pktmbuf_mtod_offset(m, struct arp_hdr *, m->l2_len); + ahdr = rte_pktmbuf_mtod_offset(m, struct rte_arp_hdr *, m->l2_len); adata = &ahdr->arp_data; - ahdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY); + ahdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); tip = adata->arp_tip; adata->arp_tip = adata->arp_sip; diff --git a/examples/l4fwd/netbe.h b/examples/l4fwd/netbe.h index 661cdcb..430bc28 100644 --- a/examples/l4fwd/netbe.h +++ b/examples/l4fwd/netbe.h @@ -75,7 +75,7 @@ struct netbe_port { uint64_t tx_offload; uint32_t ipv4; struct in6_addr ipv6; - struct ether_addr mac; + struct rte_ether_addr mac; uint32_t hash_key_size; uint8_t hash_key[RSS_HASH_KEY_LENGTH]; }; @@ -90,7 +90,7 @@ struct netbe_dest { struct in_addr ipv4; struct in6_addr ipv6; }; - struct ether_addr mac; + struct rte_ether_addr mac; }; struct netbe_dest_prm { diff --git a/examples/l4fwd/parse.c b/examples/l4fwd/parse.c index a1e7917..b936bab 100644 --- a/examples/l4fwd/parse.c +++ b/examples/l4fwd/parse.c @@ -334,7 +334,7 @@ parse_netbe_arg(struct netbe_port *prt, const char *arg, rte_cpuset_t *pcpu) union parse_val val[RTE_DIM(hndl)]; memset(val, 0, sizeof(val)); - val[2].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN; + val[2].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man), keys_opt, RTE_DIM(keys_opt), hndl, val); @@ -377,7 +377,8 @@ check_netbe_dest(const struct netbe_dest *dst) RTE_LOG(ERR, USER1, "%s(line=%u) invalid masklen=%u", __func__, dst->line, dst->prfx); return -EINVAL; - } else if (dst->mtu > ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN) { + } else if (dst->mtu > + RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN) { RTE_LOG(ERR, USER1, "%s(line=%u) invalid mtu=%u", __func__, dst->line, dst->mtu); return -EINVAL; @@ -413,7 +414,7 @@ parse_netbe_dest(struct netbe_dest *dst, const char *arg) /* set default values. */ memset(val, 0, sizeof(val)); - val[4].u64 = ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN; + val[4].u64 = RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN; rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man), keys_opt, RTE_DIM(keys_opt), hndl, val); diff --git a/examples/l4fwd/parse.h b/examples/l4fwd/parse.h index 4303623..4634d60 100644 --- a/examples/l4fwd/parse.h +++ b/examples/l4fwd/parse.h @@ -29,7 +29,7 @@ union parse_val { struct in6_addr addr6; }; } in; - struct ether_addr mac; + struct rte_ether_addr mac; rte_cpuset_t cpuset; }; diff --git a/examples/l4fwd/pkt.c b/examples/l4fwd/pkt.c index 43aa9c8..6694e81 100644 --- a/examples/l4fwd/pkt.c +++ b/examples/l4fwd/pkt.c @@ -49,9 +49,9 @@ fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4) } static inline int -is_ipv4_frag(const struct ipv4_hdr *iph) +is_ipv4_frag(const struct rte_ipv4_hdr *iph) { - const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG); + const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG); return ((mask & iph->fragment_offset) != 0); } @@ -59,9 +59,9 @@ is_ipv4_frag(const struct ipv4_hdr *iph) static inline uint32_t get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len) { - const struct tcp_hdr *tcp; + const struct rte_tcp_hdr *tcp; - tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len); + tcp = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len); return (tcp->data_off >> 4) * 4; } @@ -69,9 +69,9 @@ static inline void adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len) { uint32_t plen, trim; - const struct ipv4_hdr *iph; + const struct rte_ipv4_hdr *iph; - iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len); + iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2_len); plen = rte_be_to_cpu_16(iph->total_length) + l2_len; if (plen < m->pkt_len) { trim = m->pkt_len - plen; @@ -83,9 +83,9 @@ static inline void adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len) { uint32_t plen, trim; - const struct ipv6_hdr *iph; + const struct rte_ipv6_hdr *iph; - iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len); + iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, l2_len); plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len; if (plen < m->pkt_len) { trim = m->pkt_len - plen; @@ -97,23 +97,24 @@ static inline void tcp_stat_update(struct netbe_lcore *lc, const struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len) { - const struct tcp_hdr *th; + const struct rte_tcp_hdr *th; - th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len); + th = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len); lc->tcp_stat.flags[th->tcp_flags]++; } static inline uint32_t get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag) { - const struct ipv4_hdr *iph; + const struct rte_ipv4_hdr *iph; int32_t dlen, len; dlen = rte_pktmbuf_data_len(m); dlen -= l2; - iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2); - len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER; + iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2); + len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER; if (frag != 0 && is_ipv4_frag(iph)) { m->packet_type &= ~RTE_PTYPE_L4_MASK; @@ -155,7 +156,7 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto, const struct ip6_ext *ipx; int32_t dlen, len, ofs; - len = sizeof(struct ipv6_hdr); + len = sizeof(struct rte_ipv6_hdr); dlen = rte_pktmbuf_data_len(m); dlen -= l2; @@ -205,13 +206,13 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto, static inline uint32_t get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto) { - const struct ipv6_hdr *iph; + const struct rte_ipv6_hdr *iph; - iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, - sizeof(struct ether_hdr)); + iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, + sizeof(struct rte_ether_hdr)); if (iph->proto == fproto) - return sizeof(struct ipv6_hdr); + return sizeof(struct rte_ipv6_hdr); else if (ipv6x_hdr(iph->proto) != 0) return get_ipv6x_hdr_len(m, l2, iph->proto, fproto); @@ -234,14 +235,14 @@ static inline struct rte_mbuf * handle_arp(struct rte_mbuf *m, struct netbe_lcore *lc, dpdk_port_t port, uint32_t l2len) { - const struct arp_hdr *ahdr; + const struct rte_arp_hdr *ahdr; struct pkt_buf *abuf; - ahdr = rte_pktmbuf_mtod_offset(m, const struct arp_hdr *, l2len); + ahdr = rte_pktmbuf_mtod_offset(m, const struct rte_arp_hdr *, l2len); - if (ahdr->arp_hrd != rte_be_to_cpu_16(ARP_HRD_ETHER) || - ahdr->arp_pro != rte_be_to_cpu_16(ETHER_TYPE_IPv4) || - ahdr->arp_op != rte_be_to_cpu_16(ARP_OP_REQUEST)) { + if (ahdr->arp_hardware != rte_be_to_cpu_16(RTE_ARP_HRD_ETHER) || + ahdr->arp_protocol != rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4) || + ahdr->arp_opcode != rte_be_to_cpu_16(RTE_ARP_OP_REQUEST)) { m->packet_type = RTE_PTYPE_UNKNOWN; return m; @@ -263,28 +264,28 @@ fill_eth_tcp_arp_hdr_len(struct rte_mbuf *m, struct netbe_lcore *lc, { uint32_t dlen, l2_len, l3_len, l4_len; uint16_t etp; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; dlen = rte_pktmbuf_data_len(m); /* check that first segment is at least 54B long. */ - if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + - sizeof(struct tcp_hdr)) { + if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_tcp_hdr)) { m->packet_type = RTE_PTYPE_UNKNOWN; return m; } l2_len = sizeof(*eth); - eth = rte_pktmbuf_mtod(m, const struct ether_hdr *); + eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *); etp = eth->ether_type; - if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN)) - l2_len += sizeof(struct vlan_hdr); + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN)) + l2_len += sizeof(struct rte_vlan_hdr); - if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP)) + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_ARP)) return handle_arp(m, lc, port, l2_len); - if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) { + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) { m->packet_type = RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; @@ -292,9 +293,9 @@ fill_eth_tcp_arp_hdr_len(struct rte_mbuf *m, struct netbe_lcore *lc, l4_len = get_tcp_header_size(m, l2_len, l3_len); fill_pkt_hdr_len(m, l2_len, l3_len, l4_len); adjust_ipv4_pktlen(m, l2_len); - } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) && - dlen >= l2_len + sizeof(struct ipv6_hdr) + - sizeof(struct tcp_hdr)) { + } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) && + dlen >= l2_len + sizeof(struct rte_ipv6_hdr) + + sizeof(struct rte_tcp_hdr)) { m->packet_type = RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; @@ -313,25 +314,25 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m) { uint32_t dlen, l2_len, l3_len, l4_len; uint16_t etp; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; dlen = rte_pktmbuf_data_len(m); /* check that first segment is at least 54B long. */ - if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + - sizeof(struct tcp_hdr)) { + if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_tcp_hdr)) { m->packet_type = RTE_PTYPE_UNKNOWN; return; } l2_len = sizeof(*eth); - eth = rte_pktmbuf_mtod(m, const struct ether_hdr *); + eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *); etp = eth->ether_type; - if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN)) - l2_len += sizeof(struct vlan_hdr); + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN)) + l2_len += sizeof(struct rte_vlan_hdr); - if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) { + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) { m->packet_type = RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; @@ -339,9 +340,9 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m) l4_len = get_tcp_header_size(m, l2_len, l3_len); fill_pkt_hdr_len(m, l2_len, l3_len, l4_len); adjust_ipv4_pktlen(m, l2_len); - } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) && - dlen >= l2_len + sizeof(struct ipv6_hdr) + - sizeof(struct tcp_hdr)) { + } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) && + dlen >= l2_len + sizeof(struct rte_ipv6_hdr) + + sizeof(struct rte_tcp_hdr)) { m->packet_type = RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; @@ -358,38 +359,38 @@ fill_eth_udp_hdr_len(struct rte_mbuf *m) { uint32_t dlen, l2_len; uint16_t etp; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; dlen = rte_pktmbuf_data_len(m); /* check that first segment is at least 42B long. */ - if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + - sizeof(struct udp_hdr)) { + if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr)) { m->packet_type = RTE_PTYPE_UNKNOWN; return; } l2_len = sizeof(*eth); - eth = rte_pktmbuf_mtod(m, const struct ether_hdr *); + eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *); etp = eth->ether_type; - if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN)) - l2_len += sizeof(struct vlan_hdr); + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN)) + l2_len += sizeof(struct rte_vlan_hdr); - if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) { + if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) { m->packet_type = RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; fill_ipv4_hdr_len(m, l2_len, IPPROTO_UDP, 1, - sizeof(struct udp_hdr)); - } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) && - dlen >= l2_len + sizeof(struct ipv6_hdr) + - sizeof(struct udp_hdr)) { + sizeof(struct rte_udp_hdr)); + } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) && + dlen >= l2_len + sizeof(struct rte_ipv6_hdr) + + sizeof(struct rte_udp_hdr)) { m->packet_type = RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER; fill_ipv6_hdr_len(m, l2_len, IPPROTO_UDP, - sizeof(struct udp_hdr)); + sizeof(struct rte_udp_hdr)); } else m->packet_type = RTE_PTYPE_UNKNOWN; } @@ -406,7 +407,7 @@ ipv4x_cksum(const void *iph, size_t len) static inline void fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto) { - struct ipv4_hdr *iph; + struct rte_ipv4_hdr *iph; /* update packet type. */ m->packet_type &= ~RTE_PTYPE_L4_MASK; @@ -425,7 +426,8 @@ fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto) /* recalculate ipv4 cksum after reassemble. */ else if (hwcsum == 0 && RTE_ETH_IS_IPV4_HDR(m->packet_type)) { - iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->l2_len); iph->hdr_checksum = ipv4x_cksum(iph, m->l3_len); } } @@ -444,19 +446,21 @@ reassemble(struct rte_mbuf *m, struct netbe_lcore *lc, uint64_t tms, if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { - struct ipv4_hdr *iph; + struct rte_ipv4_hdr *iph; - iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->l2_len); /* process this fragment. */ m = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, iph); } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { - struct ipv6_hdr *iph; + struct rte_ipv6_hdr *iph; struct ipv6_extension_fragment *fhdr; - iph = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len); + iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->l2_len); /* * we store fragment header offset in tso_segsz before @@ -535,7 +539,7 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port, uint32_t j, tp; struct netbe_lcore *lc; uint32_t l4_len, l3_len, l2_len; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; lc = user_param; l2_len = sizeof(*eth); @@ -554,17 +558,17 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port, case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER): l4_len = get_tcp_header_size(pkt[j], l2_len, - sizeof(struct ipv4_hdr)); + sizeof(struct rte_ipv4_hdr)); fill_pkt_hdr_len(pkt[j], l2_len, - sizeof(struct ipv4_hdr), l4_len); + sizeof(struct rte_ipv4_hdr), l4_len); adjust_ipv4_pktlen(pkt[j], l2_len); break; case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER): l4_len = get_tcp_header_size(pkt[j], l2_len, - sizeof(struct ipv6_hdr)); + sizeof(struct rte_ipv6_hdr)); fill_pkt_hdr_len(pkt[j], l2_len, - sizeof(struct ipv6_hdr), l4_len); + sizeof(struct rte_ipv6_hdr), l4_len); adjust_ipv6_pktlen(pkt[j], l2_len); break; case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT | @@ -604,7 +608,7 @@ type0_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue, uint64_t cts; struct netbe_lcore *lc; uint32_t l2_len; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; lc = user_param; cts = 0; @@ -623,37 +627,37 @@ type0_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue, case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER): fill_pkt_hdr_len(pkt[j], l2_len, - sizeof(struct ipv4_hdr), - sizeof(struct udp_hdr)); + sizeof(struct rte_ipv4_hdr), + sizeof(struct rte_udp_hdr)); adjust_ipv4_pktlen(pkt[j], l2_len); break; case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER): fill_pkt_hdr_len(pkt[j], l2_len, - sizeof(struct ipv6_hdr), - sizeof(struct udp_hdr)); + sizeof(struct rte_ipv6_hdr), + sizeof(struct rte_udp_hdr)); adjust_ipv6_pktlen(pkt[j], l2_len); break; case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER): fill_ipv4_hdr_len(pkt[j], l2_len, - UINT32_MAX, 0, sizeof(struct udp_hdr)); + UINT32_MAX, 0, sizeof(struct rte_udp_hdr)); break; case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER): fill_ipv6_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, sizeof(struct udp_hdr)); + IPPROTO_UDP, sizeof(struct rte_udp_hdr)); break; /* possibly fragmented udp packets. */ case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER): case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER): fill_ipv4_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, 1, sizeof(struct udp_hdr)); + IPPROTO_UDP, 1, sizeof(struct rte_udp_hdr)); break; case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER): case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER): fill_ipv6_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, sizeof(struct udp_hdr)); + IPPROTO_UDP, sizeof(struct rte_udp_hdr)); break; default: /* treat packet types as invalid. */ @@ -690,7 +694,7 @@ type1_tcp_rx_callback(__rte_unused dpdk_port_t port, uint32_t j, tp; struct netbe_lcore *lc; uint32_t l4_len, l3_len, l2_len; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; lc = user_param; l2_len = sizeof(*eth); @@ -745,7 +749,7 @@ type1_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue, uint64_t cts; struct netbe_lcore *lc; uint32_t l2_len; - const struct ether_hdr *eth; + const struct rte_ether_hdr *eth; lc = user_param; cts = 0; @@ -763,22 +767,22 @@ type1_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue, case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER): fill_ipv4_hdr_len(pkt[j], l2_len, - UINT32_MAX, 0, sizeof(struct udp_hdr)); + UINT32_MAX, 0, sizeof(struct rte_udp_hdr)); break; case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER): fill_ipv6_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, sizeof(struct udp_hdr)); + IPPROTO_UDP, sizeof(struct rte_udp_hdr)); break; case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER): fill_ipv4_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, 0, sizeof(struct udp_hdr)); + IPPROTO_UDP, 0, sizeof(struct rte_udp_hdr)); break; case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER): fill_ipv6_hdr_len(pkt[j], l2_len, - IPPROTO_UDP, sizeof(struct udp_hdr)); + IPPROTO_UDP, sizeof(struct rte_udp_hdr)); break; default: /* treat packet types as invalid. */ diff --git a/examples/l4fwd/port.h b/examples/l4fwd/port.h index 8c1a899..ce730dd 100644 --- a/examples/l4fwd/port.h +++ b/examples/l4fwd/port.h @@ -182,8 +182,8 @@ port_init(struct netbe_port *uprt, uint32_t proto) __func__, uprt->id); port_conf.rxmode.offloads |= uprt->rx_offload & RX_CSUM_OFFLOAD; } - port_conf.rxmode.max_rx_pkt_len = uprt->mtu + ETHER_CRC_LEN; - if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN) + port_conf.rxmode.max_rx_pkt_len = uprt->mtu + RTE_ETHER_CRC_LEN; + if (port_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; rc = update_rss_conf(uprt, &dev_info, &port_conf, proto); @@ -258,8 +258,7 @@ check_lcore(uint32_t lc) return -EINVAL; } if (rte_eal_get_lcore_state(lc) == RUNNING) { - RTE_LOG(ERR, USER1, "lcore %u already running %p\n", - lc, lcore_config[lc].f); + RTE_LOG(ERR, USER1, "lcore %u already in use\n", lc); return -EINVAL; } return 0; diff --git a/examples/l4fwd/udp.h b/examples/l4fwd/udp.h index c079e9c..2465f08 100644 --- a/examples/l4fwd/udp.h +++ b/examples/l4fwd/udp.h @@ -252,25 +252,25 @@ static inline void netfe_pkt_addr(const struct rte_mbuf *m, struct sockaddr_storage *ps, uint16_t family) { - const struct ipv4_hdr *ip4h; - const struct ipv6_hdr *ip6h; - const struct udp_hdr *udph; + const struct rte_ipv4_hdr *ip4h; + const struct rte_ipv6_hdr *ip6h; + const struct rte_udp_hdr *udph; struct sockaddr_in *in4; struct sockaddr_in6 *in6; NETFE_PKT_DUMP(m); - udph = rte_pktmbuf_mtod_offset(m, struct udp_hdr *, -m->l4_len); + udph = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -m->l4_len); if (family == AF_INET) { in4 = (struct sockaddr_in *)ps; - ip4h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + ip4h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, -(m->l4_len + m->l3_len)); in4->sin_port = udph->src_port; in4->sin_addr.s_addr = ip4h->src_addr; } else { in6 = (struct sockaddr_in6 *)ps; - ip6h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, + ip6h = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, -(m->l4_len + m->l3_len)); in6->sin6_port = udph->src_port; rte_memcpy(&in6->sin6_addr, ip6h->src_addr, -- cgit 1.2.3-korg