aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Ananyev <konstantin.ananyev@intel.com>2021-04-28 15:39:11 +0000
committerKonstantin Ananyev <konstantin.ananyev@intel.com>2021-04-28 15:42:05 +0000
commit10ae544444cfafc4367d9c1f39f332b6206772bf (patch)
tree5743c700d0e1713934379c566cfe88b4db001d45
parent71ba97fe8ee93aa2bb17e774c1d73c6e58c03b2b (diff)
bump dpdk version to 20.05
Bump dpdk version to 20.05 and adjust tldk source. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Change-Id: Id2ce864ad20b3b347f1ac05cd67c15384e454c52
-rw-r--r--app/nginx/src/tldk/be.c102
-rw-r--r--app/nginx/src/tldk/ngx_tldk.h4
-rw-r--r--app/nginx/src/tldk/parse.c6
-rw-r--r--dpdk/Makefile2
-rw-r--r--examples/l4fwd/common.h38
-rw-r--r--examples/l4fwd/netbe.h4
-rw-r--r--examples/l4fwd/parse.c7
-rw-r--r--examples/l4fwd/parse.h2
-rw-r--r--examples/l4fwd/pkt.c168
-rw-r--r--examples/l4fwd/port.h7
-rw-r--r--examples/l4fwd/udp.h12
-rw-r--r--lib/libtle_l4p/misc.h20
-rw-r--r--lib/libtle_l4p/stream.h8
-rw-r--r--lib/libtle_l4p/tcp_misc.h22
-rw-r--r--lib/libtle_l4p/tcp_rxtx.c45
-rw-r--r--lib/libtle_l4p/udp_rxtx.c18
-rw-r--r--test/gtest/test_common.cpp52
-rw-r--r--test/gtest/test_common.h2
-rw-r--r--test/gtest/test_tle_udp_stream_gen.h22
19 files changed, 275 insertions, 266 deletions
diff --git a/app/nginx/src/tldk/be.c b/app/nginx/src/tldk/be.c
index 1b7b496..b226c26 100644
--- a/app/nginx/src/tldk/be.c
+++ b/app/nginx/src/tldk/be.c
@@ -204,8 +204,8 @@ port_init(const struct tldk_port_conf *pcf)
port_conf.rxmode.offloads |= pcf->rx_offload & RX_CSUM_OFFLOAD;
}
- port_conf.rxmode.max_rx_pkt_len = pcf->mtu + ETHER_CRC_LEN;
- if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ port_conf.rxmode.max_rx_pkt_len = pcf->mtu + RTE_ETHER_CRC_LEN;
+ if (port_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP;
@@ -240,8 +240,7 @@ be_check_lcore(uint32_t lid)
if (rte_get_master_lcore() != lid &&
rte_eal_get_lcore_state(lid) == RUNNING) {
- RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
- lid, lcore_config[lid].f);
+ RTE_LOG(ERR, USER1, "lcore %u already in use\n", lid);
return -EINVAL;
}
@@ -424,31 +423,31 @@ fill_dst(struct tle_dest *dst, const struct tldk_dev *td,
const struct tldk_port_conf *pcf, const struct tldk_dest_conf *dest,
uint16_t l3_type, struct rte_mempool *mp)
{
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv6_hdr *ip6h;
dst->dev = td->dev;
dst->head_mp = mp;
dst->mtu = RTE_MIN(dest->mtu, pcf->mtu);
dst->l2_len = sizeof(*eth);
- eth = (struct ether_hdr *)dst->hdr;
+ eth = (struct rte_ether_hdr *)dst->hdr;
- ether_addr_copy(&pcf->mac, &eth->s_addr);
- ether_addr_copy(&dest->mac, &eth->d_addr);
+ rte_ether_addr_copy(&pcf->mac, &eth->s_addr);
+ rte_ether_addr_copy(&dest->mac, &eth->d_addr);
eth->ether_type = rte_cpu_to_be_16(l3_type);
- if (l3_type == ETHER_TYPE_IPv4) {
+ if (l3_type == RTE_ETHER_TYPE_IPV4) {
dst->l3_len = sizeof(*ip4h);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = 4 << 4 |
- sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
+ sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER;
ip4h->time_to_live = 64;
ip4h->next_proto_id = IPPROTO_TCP;
- } else if (l3_type == ETHER_TYPE_IPv6) {
+ } else if (l3_type == RTE_ETHER_TYPE_IPV6) {
dst->l3_len = sizeof(*ip6h);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = IPPROTO_TCP;
ip6h->hop_limits = 64;
@@ -469,12 +468,12 @@ be_add_dest(const struct tldk_dest_conf *dcf, struct tldk_ctx *tcx,
n = tcx->dst4_num;
dp = tcx->dst4 + n;
m = RTE_DIM(tcx->dst4);
- l3_type = ETHER_TYPE_IPv4;
+ l3_type = RTE_ETHER_TYPE_IPV4;
} else {
n = tcx->dst6_num;
dp = tcx->dst6 + n;
m = RTE_DIM(tcx->dst6);
- l3_type = ETHER_TYPE_IPv6;
+ l3_type = RTE_ETHER_TYPE_IPV6;
}
if (n + dnum >= m) {
@@ -650,9 +649,9 @@ fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
}
static inline int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
@@ -660,9 +659,9 @@ is_ipv4_frag(const struct ipv4_hdr *iph)
static inline uint32_t
get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *tcp;
+ const struct rte_tcp_hdr *tcp;
- tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ tcp = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
return (tcp->data_off >> 4) * 4;
}
@@ -670,9 +669,9 @@ static inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
@@ -684,9 +683,9 @@ static inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
@@ -698,23 +697,24 @@ static inline void
tcp_stat_update(struct tldk_ctx *lc, const struct rte_mbuf *m,
uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
- th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ th = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
lc->tcp_stat.flags[th->tcp_flags]++;
}
static inline uint32_t
get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
@@ -745,7 +745,7 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
@@ -795,13 +795,13 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
static inline uint32_t
get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- return sizeof(struct ipv6_hdr);
+ return sizeof(struct rte_ipv6_hdr);
else if (ipv6x_hdr(iph->proto) != 0)
return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
@@ -814,25 +814,25 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m)
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -840,9 +840,9 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m)
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -865,7 +865,7 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
{
uint32_t j, tp;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
l2_len = sizeof(*eth);
@@ -881,17 +881,17 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr), l4_len);
+ sizeof(struct rte_ipv4_hdr), l4_len);
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr), l4_len);
+ sizeof(struct rte_ipv6_hdr), l4_len);
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
@@ -931,7 +931,7 @@ type1_tcp_rx_callback(__rte_unused dpdk_port_t port,
uint32_t j, tp;
struct tldk_ctx *tcx;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
tcx = user_param;
l2_len = sizeof(*eth);
diff --git a/app/nginx/src/tldk/ngx_tldk.h b/app/nginx/src/tldk/ngx_tldk.h
index ed6ae35..592a555 100644
--- a/app/nginx/src/tldk/ngx_tldk.h
+++ b/app/nginx/src/tldk/ngx_tldk.h
@@ -58,7 +58,7 @@ struct tldk_port_conf {
uint64_t tx_offload;
uint32_t ipv4;
struct in6_addr ipv6;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
struct tldk_dev_conf {
@@ -76,7 +76,7 @@ struct tldk_dest_conf {
struct in_addr ipv4;
struct in6_addr ipv6;
};
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
#define TLDK_MAX_DEST 0x10
diff --git a/app/nginx/src/tldk/parse.c b/app/nginx/src/tldk/parse.c
index 6e20b1b..455d276 100644
--- a/app/nginx/src/tldk/parse.c
+++ b/app/nginx/src/tldk/parse.c
@@ -38,7 +38,7 @@ union parse_val {
struct in6_addr addr6;
};
} in;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
rte_cpuset_t cpuset;
};
@@ -163,7 +163,7 @@ tldk_port_parse(ngx_conf_t *cf, struct tldk_port_conf *prt)
union parse_val pvl[RTE_DIM(kh)];
memset(pvl, 0, sizeof(pvl));
- pvl[1].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ pvl[1].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
if (cf->args->nelts % 2 != 0)
return NGX_CONF_ERROR;
@@ -286,7 +286,7 @@ tldk_dest_parse(ngx_conf_t *cf, struct tldk_dest_conf *dst)
union parse_val pvl[RTE_DIM(kh)];
memset(pvl, 0, sizeof(pvl));
- pvl[1].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ pvl[1].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
if (cf->args->nelts % 2 != 1 || cf->args->nelts == 1)
return NGX_CONF_ERROR;
diff --git a/dpdk/Makefile b/dpdk/Makefile
index 15204fa..c474433 100644
--- a/dpdk/Makefile
+++ b/dpdk/Makefile
@@ -14,7 +14,7 @@
# Scripts require non-POSIX parts of bash
SHELL := /bin/bash
-DPDK_VERSION ?= v18.11
+DPDK_VERSION ?= v20.05
DPDK_BUILD_DIR ?= $(CURDIR)/_build
DPDK_INSTALL_DIR ?= $(DPDK_BUILD_DIR)/dpdk/$(RTE_TARGET)
DPDK_PKTMBUF_HEADROOM ?= 128
diff --git a/examples/l4fwd/common.h b/examples/l4fwd/common.h
index b7750d7..a2cd5f6 100644
--- a/examples/l4fwd/common.h
+++ b/examples/l4fwd/common.h
@@ -357,31 +357,31 @@ fill_dst(struct tle_dest *dst, struct netbe_dev *bed,
const struct netbe_dest *bdp, uint16_t l3_type, int32_t sid,
uint8_t proto_id)
{
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
+ struct rte_ipv6_hdr *ip6h;
dst->dev = bed->dev;
dst->head_mp = frag_mpool[sid + 1];
dst->mtu = RTE_MIN(bdp->mtu, bed->port.mtu);
dst->l2_len = sizeof(*eth);
- eth = (struct ether_hdr *)dst->hdr;
+ eth = (struct rte_ether_hdr *)dst->hdr;
- ether_addr_copy(&bed->port.mac, &eth->s_addr);
- ether_addr_copy(&bdp->mac, &eth->d_addr);
+ rte_ether_addr_copy(&bed->port.mac, &eth->s_addr);
+ rte_ether_addr_copy(&bdp->mac, &eth->d_addr);
eth->ether_type = rte_cpu_to_be_16(l3_type);
- if (l3_type == ETHER_TYPE_IPv4) {
+ if (l3_type == RTE_ETHER_TYPE_IPV4) {
dst->l3_len = sizeof(*ip4h);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = 4 << 4 |
- sizeof(*ip4h) / IPV4_IHL_MULTIPLIER;
+ sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER;
ip4h->time_to_live = 64;
ip4h->next_proto_id = proto_id;
- } else if (l3_type == ETHER_TYPE_IPv6) {
+ } else if (l3_type == RTE_ETHER_TYPE_IPV6) {
dst->l3_len = sizeof(*ip6h);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = proto_id;
ip6h->hop_limits = 64;
@@ -402,12 +402,12 @@ netbe_add_dest(struct netbe_lcore *lc, uint32_t dev_idx, uint16_t family,
n = lc->dst4_num;
dp = lc->dst4 + n;
m = RTE_DIM(lc->dst4);
- l3_type = ETHER_TYPE_IPv4;
+ l3_type = RTE_ETHER_TYPE_IPV4;
} else {
n = lc->dst6_num;
dp = lc->dst6 + n;
m = RTE_DIM(lc->dst6);
- l3_type = ETHER_TYPE_IPv6;
+ l3_type = RTE_ETHER_TYPE_IPV6;
}
if (n + dnum >= m) {
@@ -441,21 +441,21 @@ netbe_add_dest(struct netbe_lcore *lc, uint32_t dev_idx, uint16_t family,
static inline void
fill_arp_reply(struct netbe_dev *dev, struct rte_mbuf *m)
{
- struct ether_hdr *eth;
- struct arp_hdr *ahdr;
- struct arp_ipv4 *adata;
+ struct rte_ether_hdr *eth;
+ struct rte_arp_hdr *ahdr;
+ struct rte_arp_ipv4 *adata;
uint32_t tip;
/* set up the ethernet data */
- eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
eth->d_addr = eth->s_addr;
eth->s_addr = dev->port.mac;
/* set up the arp data */
- ahdr = rte_pktmbuf_mtod_offset(m, struct arp_hdr *, m->l2_len);
+ ahdr = rte_pktmbuf_mtod_offset(m, struct rte_arp_hdr *, m->l2_len);
adata = &ahdr->arp_data;
- ahdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ahdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
tip = adata->arp_tip;
adata->arp_tip = adata->arp_sip;
diff --git a/examples/l4fwd/netbe.h b/examples/l4fwd/netbe.h
index 661cdcb..430bc28 100644
--- a/examples/l4fwd/netbe.h
+++ b/examples/l4fwd/netbe.h
@@ -75,7 +75,7 @@ struct netbe_port {
uint64_t tx_offload;
uint32_t ipv4;
struct in6_addr ipv6;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
uint32_t hash_key_size;
uint8_t hash_key[RSS_HASH_KEY_LENGTH];
};
@@ -90,7 +90,7 @@ struct netbe_dest {
struct in_addr ipv4;
struct in6_addr ipv6;
};
- struct ether_addr mac;
+ struct rte_ether_addr mac;
};
struct netbe_dest_prm {
diff --git a/examples/l4fwd/parse.c b/examples/l4fwd/parse.c
index a1e7917..b936bab 100644
--- a/examples/l4fwd/parse.c
+++ b/examples/l4fwd/parse.c
@@ -334,7 +334,7 @@ parse_netbe_arg(struct netbe_port *prt, const char *arg, rte_cpuset_t *pcpu)
union parse_val val[RTE_DIM(hndl)];
memset(val, 0, sizeof(val));
- val[2].u64 = ETHER_MAX_LEN - ETHER_CRC_LEN;
+ val[2].u64 = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man),
keys_opt, RTE_DIM(keys_opt), hndl, val);
@@ -377,7 +377,8 @@ check_netbe_dest(const struct netbe_dest *dst)
RTE_LOG(ERR, USER1, "%s(line=%u) invalid masklen=%u",
__func__, dst->line, dst->prfx);
return -EINVAL;
- } else if (dst->mtu > ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN) {
+ } else if (dst->mtu >
+ RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN) {
RTE_LOG(ERR, USER1, "%s(line=%u) invalid mtu=%u",
__func__, dst->line, dst->mtu);
return -EINVAL;
@@ -413,7 +414,7 @@ parse_netbe_dest(struct netbe_dest *dst, const char *arg)
/* set default values. */
memset(val, 0, sizeof(val));
- val[4].u64 = ETHER_MAX_JUMBO_FRAME_LEN - ETHER_CRC_LEN;
+ val[4].u64 = RTE_ETHER_MAX_JUMBO_FRAME_LEN - RTE_ETHER_CRC_LEN;
rc = parse_kvargs(arg, keys_man, RTE_DIM(keys_man),
keys_opt, RTE_DIM(keys_opt), hndl, val);
diff --git a/examples/l4fwd/parse.h b/examples/l4fwd/parse.h
index 4303623..4634d60 100644
--- a/examples/l4fwd/parse.h
+++ b/examples/l4fwd/parse.h
@@ -29,7 +29,7 @@ union parse_val {
struct in6_addr addr6;
};
} in;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
rte_cpuset_t cpuset;
};
diff --git a/examples/l4fwd/pkt.c b/examples/l4fwd/pkt.c
index 43aa9c8..6694e81 100644
--- a/examples/l4fwd/pkt.c
+++ b/examples/l4fwd/pkt.c
@@ -49,9 +49,9 @@ fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
}
static inline int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
@@ -59,9 +59,9 @@ is_ipv4_frag(const struct ipv4_hdr *iph)
static inline uint32_t
get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *tcp;
+ const struct rte_tcp_hdr *tcp;
- tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ tcp = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
return (tcp->data_off >> 4) * 4;
}
@@ -69,9 +69,9 @@ static inline void
adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
@@ -83,9 +83,9 @@ static inline void
adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
{
uint32_t plen, trim;
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *, l2_len);
plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
if (plen < m->pkt_len) {
trim = m->pkt_len - plen;
@@ -97,23 +97,24 @@ static inline void
tcp_stat_update(struct netbe_lcore *lc, const struct rte_mbuf *m,
uint32_t l2_len, uint32_t l3_len)
{
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
- th = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
+ th = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, l2_len + l3_len);
lc->tcp_stat.flags[th->tcp_flags]++;
}
static inline uint32_t
get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+ RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
@@ -155,7 +156,7 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
dlen -= l2;
@@ -205,13 +206,13 @@ get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
static inline uint32_t
get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- return sizeof(struct ipv6_hdr);
+ return sizeof(struct rte_ipv6_hdr);
else if (ipv6x_hdr(iph->proto) != 0)
return get_ipv6x_hdr_len(m, l2, iph->proto, fproto);
@@ -234,14 +235,14 @@ static inline struct rte_mbuf *
handle_arp(struct rte_mbuf *m, struct netbe_lcore *lc, dpdk_port_t port,
uint32_t l2len)
{
- const struct arp_hdr *ahdr;
+ const struct rte_arp_hdr *ahdr;
struct pkt_buf *abuf;
- ahdr = rte_pktmbuf_mtod_offset(m, const struct arp_hdr *, l2len);
+ ahdr = rte_pktmbuf_mtod_offset(m, const struct rte_arp_hdr *, l2len);
- if (ahdr->arp_hrd != rte_be_to_cpu_16(ARP_HRD_ETHER) ||
- ahdr->arp_pro != rte_be_to_cpu_16(ETHER_TYPE_IPv4) ||
- ahdr->arp_op != rte_be_to_cpu_16(ARP_OP_REQUEST)) {
+ if (ahdr->arp_hardware != rte_be_to_cpu_16(RTE_ARP_HRD_ETHER) ||
+ ahdr->arp_protocol != rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4) ||
+ ahdr->arp_opcode != rte_be_to_cpu_16(RTE_ARP_OP_REQUEST)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return m;
@@ -263,28 +264,28 @@ fill_eth_tcp_arp_hdr_len(struct rte_mbuf *m, struct netbe_lcore *lc,
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return m;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP))
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_ARP))
return handle_arp(m, lc, port, l2_len);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -292,9 +293,9 @@ fill_eth_tcp_arp_hdr_len(struct rte_mbuf *m, struct netbe_lcore *lc,
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -313,25 +314,25 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m)
{
uint32_t dlen, l2_len, l3_len, l4_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 54B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct tcp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -339,9 +340,9 @@ fill_eth_tcp_hdr_len(struct rte_mbuf *m)
l4_len = get_tcp_header_size(m, l2_len, l3_len);
fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
adjust_ipv4_pktlen(m, l2_len);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct tcp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_tcp_hdr)) {
m->packet_type = RTE_PTYPE_L4_TCP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
@@ -358,38 +359,38 @@ fill_eth_udp_hdr_len(struct rte_mbuf *m)
{
uint32_t dlen, l2_len;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 42B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct udp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2_len = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2_len += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2_len += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv4_hdr_len(m, l2_len, IPPROTO_UDP, 1,
- sizeof(struct udp_hdr));
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2_len + sizeof(struct ipv6_hdr) +
- sizeof(struct udp_hdr)) {
+ sizeof(struct rte_udp_hdr));
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2_len + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv6_hdr_len(m, l2_len, IPPROTO_UDP,
- sizeof(struct udp_hdr));
+ sizeof(struct rte_udp_hdr));
} else
m->packet_type = RTE_PTYPE_UNKNOWN;
}
@@ -406,7 +407,7 @@ ipv4x_cksum(const void *iph, size_t len)
static inline void
fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto)
{
- struct ipv4_hdr *iph;
+ struct rte_ipv4_hdr *iph;
/* update packet type. */
m->packet_type &= ~RTE_PTYPE_L4_MASK;
@@ -425,7 +426,8 @@ fix_reassembled(struct rte_mbuf *m, int32_t hwcsum, uint32_t proto)
/* recalculate ipv4 cksum after reassemble. */
else if (hwcsum == 0 && RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
iph->hdr_checksum = ipv4x_cksum(iph, m->l3_len);
}
}
@@ -444,19 +446,21 @@ reassemble(struct rte_mbuf *m, struct netbe_lcore *lc, uint64_t tms,
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- struct ipv4_hdr *iph;
+ struct rte_ipv4_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
/* process this fragment. */
m = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, iph);
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
- struct ipv6_hdr *iph;
+ struct rte_ipv6_hdr *iph;
struct ipv6_extension_fragment *fhdr;
- iph = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);
+ iph = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+ m->l2_len);
/*
* we store fragment header offset in tso_segsz before
@@ -535,7 +539,7 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
uint32_t j, tp;
struct netbe_lcore *lc;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
l2_len = sizeof(*eth);
@@ -554,17 +558,17 @@ type0_tcp_rx_callback(__rte_unused dpdk_port_t port,
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv4_hdr));
+ sizeof(struct rte_ipv4_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr), l4_len);
+ sizeof(struct rte_ipv4_hdr), l4_len);
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
l4_len = get_tcp_header_size(pkt[j], l2_len,
- sizeof(struct ipv6_hdr));
+ sizeof(struct rte_ipv6_hdr));
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr), l4_len);
+ sizeof(struct rte_ipv6_hdr), l4_len);
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
@@ -604,7 +608,7 @@ type0_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue,
uint64_t cts;
struct netbe_lcore *lc;
uint32_t l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
cts = 0;
@@ -623,37 +627,37 @@ type0_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue,
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L2_ETHER):
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv4_hdr),
- sizeof(struct udp_hdr));
+ sizeof(struct rte_ipv4_hdr),
+ sizeof(struct rte_udp_hdr));
adjust_ipv4_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L2_ETHER):
fill_pkt_hdr_len(pkt[j], l2_len,
- sizeof(struct ipv6_hdr),
- sizeof(struct udp_hdr));
+ sizeof(struct rte_ipv6_hdr),
+ sizeof(struct rte_udp_hdr));
adjust_ipv6_pktlen(pkt[j], l2_len);
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- UINT32_MAX, 0, sizeof(struct udp_hdr));
+ UINT32_MAX, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
/* possibly fragmented udp packets. */
case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER):
case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, 1, sizeof(struct udp_hdr));
+ IPPROTO_UDP, 1, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER):
case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
default:
/* treat packet types as invalid. */
@@ -690,7 +694,7 @@ type1_tcp_rx_callback(__rte_unused dpdk_port_t port,
uint32_t j, tp;
struct netbe_lcore *lc;
uint32_t l4_len, l3_len, l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
l2_len = sizeof(*eth);
@@ -745,7 +749,7 @@ type1_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue,
uint64_t cts;
struct netbe_lcore *lc;
uint32_t l2_len;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
lc = user_param;
cts = 0;
@@ -763,22 +767,22 @@ type1_udp_rx_callback(dpdk_port_t port, __rte_unused uint16_t queue,
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- UINT32_MAX, 0, sizeof(struct udp_hdr));
+ UINT32_MAX, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv4_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, 0, sizeof(struct udp_hdr));
+ IPPROTO_UDP, 0, sizeof(struct rte_udp_hdr));
break;
case (RTE_PTYPE_L4_FRAG | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER):
fill_ipv6_hdr_len(pkt[j], l2_len,
- IPPROTO_UDP, sizeof(struct udp_hdr));
+ IPPROTO_UDP, sizeof(struct rte_udp_hdr));
break;
default:
/* treat packet types as invalid. */
diff --git a/examples/l4fwd/port.h b/examples/l4fwd/port.h
index 8c1a899..ce730dd 100644
--- a/examples/l4fwd/port.h
+++ b/examples/l4fwd/port.h
@@ -182,8 +182,8 @@ port_init(struct netbe_port *uprt, uint32_t proto)
__func__, uprt->id);
port_conf.rxmode.offloads |= uprt->rx_offload & RX_CSUM_OFFLOAD;
}
- port_conf.rxmode.max_rx_pkt_len = uprt->mtu + ETHER_CRC_LEN;
- if (port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ port_conf.rxmode.max_rx_pkt_len = uprt->mtu + RTE_ETHER_CRC_LEN;
+ if (port_conf.rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
rc = update_rss_conf(uprt, &dev_info, &port_conf, proto);
@@ -258,8 +258,7 @@ check_lcore(uint32_t lc)
return -EINVAL;
}
if (rte_eal_get_lcore_state(lc) == RUNNING) {
- RTE_LOG(ERR, USER1, "lcore %u already running %p\n",
- lc, lcore_config[lc].f);
+ RTE_LOG(ERR, USER1, "lcore %u already in use\n", lc);
return -EINVAL;
}
return 0;
diff --git a/examples/l4fwd/udp.h b/examples/l4fwd/udp.h
index c079e9c..2465f08 100644
--- a/examples/l4fwd/udp.h
+++ b/examples/l4fwd/udp.h
@@ -252,25 +252,25 @@ static inline void
netfe_pkt_addr(const struct rte_mbuf *m, struct sockaddr_storage *ps,
uint16_t family)
{
- const struct ipv4_hdr *ip4h;
- const struct ipv6_hdr *ip6h;
- const struct udp_hdr *udph;
+ const struct rte_ipv4_hdr *ip4h;
+ const struct rte_ipv6_hdr *ip6h;
+ const struct rte_udp_hdr *udph;
struct sockaddr_in *in4;
struct sockaddr_in6 *in6;
NETFE_PKT_DUMP(m);
- udph = rte_pktmbuf_mtod_offset(m, struct udp_hdr *, -m->l4_len);
+ udph = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -m->l4_len);
if (family == AF_INET) {
in4 = (struct sockaddr_in *)ps;
- ip4h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ ip4h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-(m->l4_len + m->l3_len));
in4->sin_port = udph->src_port;
in4->sin_addr.s_addr = ip4h->src_addr;
} else {
in6 = (struct sockaddr_in6 *)ps;
- ip6h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ ip6h = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
-(m->l4_len + m->l3_len));
in6->sin6_port = udph->src_port;
rte_memcpy(&in6->sin6_addr, ip6h->src_addr,
diff --git a/lib/libtle_l4p/misc.h b/lib/libtle_l4p/misc.h
index 327296f..e1efb0d 100644
--- a/lib/libtle_l4p/misc.h
+++ b/lib/libtle_l4p/misc.h
@@ -207,7 +207,7 @@ __udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
* The non-complemented checksum to set in the L4 header.
*/
static inline uint16_t
-_ipv4x_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
+_ipv4x_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
uint64_t ol_flags)
{
uint32_t s0, s1;
@@ -243,7 +243,7 @@ _ipv4x_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
*/
static inline int
_ipv4_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
- const struct ipv4_hdr *ipv4_hdr)
+ const struct rte_ipv4_hdr *ipv4_hdr)
{
uint32_t cksum;
@@ -267,7 +267,7 @@ _ipv4_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
*/
static inline int
_ipv6_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
- const struct ipv6_hdr *ipv6_hdr)
+ const struct rte_ipv6_hdr *ipv6_hdr)
{
uint32_t cksum;
@@ -293,9 +293,9 @@ static inline int
check_pkt_csum(const struct rte_mbuf *m, uint64_t ol_flags, uint32_t type,
uint32_t proto)
{
- const struct ipv4_hdr *l3h4;
- const struct ipv6_hdr *l3h6;
- const struct udp_hdr *l4h;
+ const struct rte_ipv4_hdr *l3h4;
+ const struct rte_ipv6_hdr *l3h6;
+ const struct rte_udp_hdr *l4h;
uint64_t fl3, fl4;
uint16_t csum;
int32_t ret;
@@ -313,8 +313,10 @@ check_pkt_csum(const struct rte_mbuf *m, uint64_t ol_flags, uint32_t type,
return 1;
/* case 2: either ip or l4 or both cksum is unknown */
- l3h4 = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, m->l2_len);
- l3h6 = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, m->l2_len);
+ l3h4 = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *,
+ m->l2_len);
+ l3h6 = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ m->l2_len);
ret = 0;
if (fl3 == PKT_RX_IP_CKSUM_UNKNOWN && l3h4->hdr_checksum != 0) {
@@ -329,7 +331,7 @@ check_pkt_csum(const struct rte_mbuf *m, uint64_t ol_flags, uint32_t type,
* for IPv6 valid UDP cksum is mandatory.
*/
if (type == TLE_V4) {
- l4h = (const struct udp_hdr *)((uintptr_t)l3h4 +
+ l4h = (const struct rte_udp_hdr *)((uintptr_t)l3h4 +
m->l3_len);
csum = (proto == IPPROTO_UDP && l4h->dgram_cksum == 0) ?
UINT16_MAX : _ipv4_udptcp_mbuf_cksum(m,
diff --git a/lib/libtle_l4p/stream.h b/lib/libtle_l4p/stream.h
index 49a2809..ebefa6c 100644
--- a/lib/libtle_l4p/stream.h
+++ b/lib/libtle_l4p/stream.h
@@ -151,13 +151,13 @@ stream_get_dest(struct tle_stream *s, const void *dst_addr,
dst->ol_flags = dev->tx.ol_flags[s->type];
if (s->type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(dst->hdr + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(dst->hdr + dst->l2_len);
l3h->src_addr = dev->prm.local_addr4.s_addr;
l3h->dst_addr = d4->s_addr;
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(dst->hdr + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(dst->hdr + dst->l2_len);
rte_memcpy(l3h->src_addr, &dev->prm.local_addr6,
sizeof(l3h->src_addr));
rte_memcpy(l3h->dst_addr, d6, sizeof(l3h->dst_addr));
diff --git a/lib/libtle_l4p/tcp_misc.h b/lib/libtle_l4p/tcp_misc.h
index 0cef8b2..01c1e67 100644
--- a/lib/libtle_l4p/tcp_misc.h
+++ b/lib/libtle_l4p/tcp_misc.h
@@ -33,10 +33,10 @@ extern "C" {
#define TCP_WSCALE_DEFAULT 7
#define TCP_WSCALE_NONE 0
-#define TCP_TX_HDR_MAX (sizeof(struct tcp_hdr) + TCP_TX_OPT_LEN_MAX)
+#define TCP_TX_HDR_MAX (sizeof(struct rte_tcp_hdr) + TCP_TX_OPT_LEN_MAX)
/* max header size for normal data+ack packet */
-#define TCP_TX_HDR_DACK (sizeof(struct tcp_hdr) + TCP_TX_OPT_LEN_TMS)
+#define TCP_TX_HDR_DACK (sizeof(struct rte_tcp_hdr) + TCP_TX_OPT_LEN_TMS)
#define TCP4_MIN_MSS 536
@@ -44,10 +44,12 @@ extern "C" {
/* default MTU, no TCP options. */
#define TCP4_NOP_MSS \
- (ETHER_MTU - sizeof(struct ipv4_hdr) - sizeof(struct tcp_hdr))
+ (RTE_ETHER_MTU - sizeof(struct rte_ipv4_hdr) - \
+ sizeof(struct rte_tcp_hdr))
#define TCP6_NOP_MSS \
- (ETHER_MTU - sizeof(struct ipv6_hdr) - sizeof(struct tcp_hdr))
+ (RTE_ETHER_MTU - sizeof(struct rte_ipv6_hdr) - \
+ sizeof(struct rte_tcp_hdr))
/* default MTU, TCP options present */
#define TCP4_OP_MSS (TCP4_NOP_MSS - TCP_TX_OPT_LEN_MAX)
@@ -256,7 +258,7 @@ tcp_seq_min(uint32_t l, uint32_t r)
}
static inline void
-get_seg_info(const struct tcp_hdr *th, union seg_info *si)
+get_seg_info(const struct rte_tcp_hdr *th, union seg_info *si)
{
__m128i v;
const __m128i bswap_mask =
@@ -421,7 +423,7 @@ static inline void
get_pkt_info(const struct rte_mbuf *m, union pkt_info *pi, union seg_info *si)
{
uint32_t len, type;
- const struct tcp_hdr *tcph;
+ const struct rte_tcp_hdr *tcph;
const union l4_ports *prt;
const union ipv4_addrs *pa4;
@@ -436,17 +438,17 @@ get_pkt_info(const struct rte_mbuf *m, union pkt_info *pi, union seg_info *si)
if (type == TLE_V4) {
pa4 = rte_pktmbuf_mtod_offset(m, const union ipv4_addrs *,
- len + offsetof(struct ipv4_hdr, src_addr));
+ len + offsetof(struct rte_ipv4_hdr, src_addr));
pi->addr4.raw = pa4->raw;
} else if (type == TLE_V6) {
pi->addr6 = rte_pktmbuf_mtod_offset(m, const union ipv6_addrs *,
- len + offsetof(struct ipv6_hdr, src_addr));
+ len + offsetof(struct rte_ipv6_hdr, src_addr));
}
len += m->l3_len;
- tcph = rte_pktmbuf_mtod_offset(m, const struct tcp_hdr *, len);
+ tcph = rte_pktmbuf_mtod_offset(m, const struct rte_tcp_hdr *, len);
prt = (const union l4_ports *)
- ((uintptr_t)tcph + offsetof(struct tcp_hdr, src_port));
+ ((uintptr_t)tcph + offsetof(struct rte_tcp_hdr, src_port));
pi->tf.flags = tcph->tcp_flags;
pi->tf.type = type;
pi->csf = m->ol_flags & (PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK);
diff --git a/lib/libtle_l4p/tcp_rxtx.c b/lib/libtle_l4p/tcp_rxtx.c
index b4bc626..b1aad60 100644
--- a/lib/libtle_l4p/tcp_rxtx.c
+++ b/lib/libtle_l4p/tcp_rxtx.c
@@ -183,7 +183,7 @@ get_ip_pid(struct tle_dev *dev, uint32_t num, uint32_t type, uint32_t st)
}
static inline void
-fill_tcph(struct tcp_hdr *l4h, const struct tcb *tcb, union l4_ports port,
+fill_tcph(struct rte_tcp_hdr *l4h, const struct tcb *tcb, union l4_ports port,
uint32_t seq, uint8_t hlen, uint8_t flags)
{
uint16_t wnd;
@@ -217,7 +217,7 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
uint32_t pid, uint32_t swcsm)
{
uint32_t l4, len, plen;
- struct tcp_hdr *l4h;
+ struct rte_tcp_hdr *l4h;
char *l2h;
len = dst->l2_len + dst->l3_len;
@@ -239,7 +239,7 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
rte_memcpy(l2h, dst->hdr, len);
/* setup TCP header & options */
- l4h = (struct tcp_hdr *)(l2h + len);
+ l4h = (struct rte_tcp_hdr *)(l2h + len);
fill_tcph(l4h, &s->tcb, port, seq, l4, flags);
/* setup mbuf TX offload related fields. */
@@ -249,8 +249,8 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
/* update proto specific fields. */
if (s->s.type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(l2h + dst->l2_len);
l3h->packet_id = rte_cpu_to_be_16(pid);
l3h->total_length = rte_cpu_to_be_16(plen + dst->l3_len + l4);
@@ -263,8 +263,8 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
if ((ol_flags & PKT_TX_IP_CKSUM) == 0 && swcsm != 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(l2h + dst->l2_len);
l3h->payload_len = rte_cpu_to_be_16(plen + l4);
if ((ol_flags & PKT_TX_TCP_CKSUM) != 0)
l4h->cksum = rte_ipv6_phdr_cksum(l3h, ol_flags);
@@ -285,11 +285,11 @@ static inline void
tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
uint32_t seq, uint32_t pid)
{
- struct tcp_hdr *l4h;
+ struct rte_tcp_hdr *l4h;
uint32_t len;
len = m->l2_len + m->l3_len;
- l4h = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, len);
+ l4h = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, len);
l4h->sent_seq = rte_cpu_to_be_32(seq);
l4h->recv_ack = rte_cpu_to_be_32(tcb->rcv.nxt);
@@ -298,8 +298,9 @@ tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
fill_tms_opts(l4h + 1, tcb->snd.ts, tcb->rcv.ts);
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ m->l2_len);
l3h->hdr_checksum = 0;
l3h->packet_id = rte_cpu_to_be_16(pid);
if ((m->ol_flags & PKT_TX_IP_CKSUM) == 0)
@@ -312,14 +313,14 @@ tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
l4h->cksum = 0;
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ struct rte_ipv4_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->l2_len);
l4h->cksum = _ipv4_udptcp_mbuf_cksum(m, len, l3h);
} else {
- struct ipv6_hdr *l3h;
- l3h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ struct rte_ipv6_hdr *l3h;
+ l3h = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->l2_len);
l4h->cksum = _ipv6_udptcp_mbuf_cksum(m, len, l3h);
}
@@ -635,7 +636,7 @@ sync_ack(struct tle_tcp_stream *s, const union pkt_info *pi,
struct tle_dev *dev;
const void *da;
struct tle_dest dst;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
type = s->s.type;
@@ -649,7 +650,7 @@ sync_ack(struct tle_tcp_stream *s, const union pkt_info *pi,
if (rc < 0)
return rc;
- th = rte_pktmbuf_mtod_offset(m, const struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(m, const struct rte_tcp_hdr *,
m->l2_len + m->l3_len);
get_syn_opts(&s->tcb.so, (uintptr_t)(th + 1), m->l4_len - sizeof(*th));
@@ -714,7 +715,7 @@ rx_tms_opt(const struct tcb *tcb, const struct rte_mbuf *mb)
{
union tsopt ts;
uintptr_t opt;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
if (tcb->so.ts.val != 0) {
opt = rte_pktmbuf_mtod_offset(mb, uintptr_t,
@@ -786,7 +787,7 @@ restore_syn_opt(union seg_info *si, union tsopt *to,
{
int32_t rc;
uint32_t len;
- const struct tcp_hdr *th;
+ const struct rte_tcp_hdr *th;
/* check that ACK, etc fields are what we expected. */
rc = sync_check_ack(pi, si->seq, si->ack - 1, ts,
@@ -797,7 +798,7 @@ restore_syn_opt(union seg_info *si, union tsopt *to,
si->mss = rc;
- th = rte_pktmbuf_mtod_offset(mb, const struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(mb, const struct rte_tcp_hdr *,
mb->l2_len + mb->l3_len);
len = mb->l4_len - sizeof(*th);
to[0] = get_tms_opts((uintptr_t)(th + 1), len);
@@ -1553,7 +1554,7 @@ rx_synack(struct tle_tcp_stream *s, uint32_t ts, uint32_t state,
struct resp_info *rsp)
{
struct syn_opts so;
- struct tcp_hdr *th;
+ struct rte_tcp_hdr *th;
if (state != TCP_ST_SYN_SENT)
return -EINVAL;
@@ -1570,7 +1571,7 @@ rx_synack(struct tle_tcp_stream *s, uint32_t ts, uint32_t state,
return 0;
}
- th = rte_pktmbuf_mtod_offset(mb, struct tcp_hdr *,
+ th = rte_pktmbuf_mtod_offset(mb, struct rte_tcp_hdr *,
mb->l2_len + mb->l3_len);
get_syn_opts(&so, (uintptr_t)(th + 1), mb->l4_len - sizeof(*th));
diff --git a/lib/libtle_l4p/udp_rxtx.c b/lib/libtle_l4p/udp_rxtx.c
index 84a13ea..8963df5 100644
--- a/lib/libtle_l4p/udp_rxtx.c
+++ b/lib/libtle_l4p/udp_rxtx.c
@@ -69,16 +69,16 @@ pkt_info(struct rte_mbuf *m, union l4_ports *ports, union ipv4_addrs *addr4,
len = m->l2_len;
if (ret.src == TLE_V4) {
pa4 = rte_pktmbuf_mtod_offset(m, union ipv4_addrs *,
- len + offsetof(struct ipv4_hdr, src_addr));
+ len + offsetof(struct rte_ipv4_hdr, src_addr));
addr4->raw = pa4->raw;
} else if (ret.src == TLE_V6) {
*addr6 = rte_pktmbuf_mtod_offset(m, union ipv6_addrs *,
- len + offsetof(struct ipv6_hdr, src_addr));
+ len + offsetof(struct rte_ipv6_hdr, src_addr));
}
len += m->l3_len;
up = rte_pktmbuf_mtod_offset(m, union l4_ports *,
- len + offsetof(struct udp_hdr, src_port));
+ len + offsetof(struct rte_udp_hdr, src_port));
ports->raw = up->raw;
ret.dst = ports->dst;
return ret;
@@ -355,8 +355,8 @@ udp_fill_mbuf(struct rte_mbuf *m,
/* update proto specific fields. */
if (type == TLE_V4) {
- struct ipv4_hdr *l3h;
- l3h = (struct ipv4_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv4_hdr *l3h;
+ l3h = (struct rte_ipv4_hdr *)(l2h + dst->l2_len);
l3h->packet_id = rte_cpu_to_be_16(pid);
l3h->total_length = rte_cpu_to_be_16(plen + dst->l3_len +
sizeof(*l4h));
@@ -370,8 +370,8 @@ udp_fill_mbuf(struct rte_mbuf *m,
if ((ol_flags & PKT_TX_IP_CKSUM) == 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
} else {
- struct ipv6_hdr *l3h;
- l3h = (struct ipv6_hdr *)(l2h + dst->l2_len);
+ struct rte_ipv6_hdr *l3h;
+ l3h = (struct rte_ipv6_hdr *)(l2h + dst->l2_len);
l3h->payload_len = rte_cpu_to_be_16(plen + sizeof(*l4h));
if ((ol_flags & PKT_TX_UDP_CKSUM) != 0)
l4h->cksum = rte_ipv6_phdr_cksum(l3h, ol_flags);
@@ -389,13 +389,13 @@ udp_fill_mbuf(struct rte_mbuf *m,
static inline void
frag_fixup(const struct rte_mbuf *ms, struct rte_mbuf *mf, uint32_t type)
{
- struct ipv4_hdr *l3h;
+ struct rte_ipv4_hdr *l3h;
mf->ol_flags = ms->ol_flags;
mf->tx_offload = ms->tx_offload;
if (type == TLE_V4 && (ms->ol_flags & PKT_TX_IP_CKSUM) == 0) {
- l3h = rte_pktmbuf_mtod(mf, struct ipv4_hdr *);
+ l3h = rte_pktmbuf_mtod(mf, struct rte_ipv4_hdr *);
l3h->hdr_checksum = _ipv4x_cksum(l3h, mf->l3_len);
}
}
diff --git a/test/gtest/test_common.cpp b/test/gtest/test_common.cpp
index 65e3a51..a91c8ba 100644
--- a/test/gtest/test_common.cpp
+++ b/test/gtest/test_common.cpp
@@ -27,7 +27,7 @@ port_init(dpdk_port_t port, struct rte_mempool *mbuf_pool)
socket_id = rte_eth_dev_socket_id(port);
memset(&port_conf, 0, sizeof(struct rte_eth_conf));
- port_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
+ port_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MAX_LEN;
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -77,9 +77,9 @@ fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
}
int
-is_ipv4_frag(const struct ipv4_hdr *iph)
+is_ipv4_frag(const struct rte_ipv4_hdr *iph)
{
- const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
+ const uint16_t mask = rte_cpu_to_be_16(~RTE_IPV4_HDR_DF_FLAG);
return ((mask & iph->fragment_offset) != 0);
}
@@ -88,14 +88,14 @@ void
fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
uint32_t frag)
{
- const struct ipv4_hdr *iph;
+ const struct rte_ipv4_hdr *iph;
int32_t dlen, len;
dlen = rte_pktmbuf_data_len(m);
- dlen -= l2 + sizeof(struct udp_hdr);
+ dlen -= l2 + sizeof(struct rte_udp_hdr);
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
- len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv4_hdr *, l2);
+ len = (iph->version_ihl & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
if (frag != 0 && is_ipv4_frag(iph)) {
m->packet_type &= ~RTE_PTYPE_L4_MASK;
@@ -105,7 +105,7 @@ fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
m->packet_type = RTE_PTYPE_UNKNOWN;
else
- fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, len, sizeof(struct rte_udp_hdr));
}
int
@@ -135,10 +135,10 @@ fill_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
const struct ip6_ext *ipx;
int32_t dlen, len, ofs;
- len = sizeof(struct ipv6_hdr);
+ len = sizeof(struct rte_ipv6_hdr);
dlen = rte_pktmbuf_data_len(m);
- dlen -= l2 + sizeof(struct udp_hdr);
+ dlen -= l2 + sizeof(struct rte_udp_hdr);
ofs = l2 + len;
ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
@@ -179,20 +179,20 @@ fill_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t nproto,
if ((ofs == 0 && nproto != fproto) || len > dlen)
m->packet_type = RTE_PTYPE_UNKNOWN;
else
- fill_pkt_hdr_len(m, l2, len, sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, len, sizeof(struct rte_udp_hdr));
}
void
fill_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
{
- const struct ipv6_hdr *iph;
+ const struct rte_ipv6_hdr *iph;
- iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
- sizeof(struct ether_hdr));
+ iph = rte_pktmbuf_mtod_offset(m, const struct rte_ipv6_hdr *,
+ sizeof(struct rte_ether_hdr));
if (iph->proto == fproto)
- fill_pkt_hdr_len(m, l2, sizeof(struct ipv6_hdr),
- sizeof(struct udp_hdr));
+ fill_pkt_hdr_len(m, l2, sizeof(struct rte_ipv6_hdr),
+ sizeof(struct rte_udp_hdr));
else if (ipv6x_hdr(iph->proto) != 0)
fill_ipv6x_hdr_len(m, l2, iph->proto, fproto);
}
@@ -202,32 +202,32 @@ fill_eth_hdr_len(struct rte_mbuf *m)
{
uint32_t dlen, l2;
uint16_t etp;
- const struct ether_hdr *eth;
+ const struct rte_ether_hdr *eth;
dlen = rte_pktmbuf_data_len(m);
/* check that first segment is at least 42B long. */
- if (dlen < sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
- sizeof(struct udp_hdr)) {
+ if (dlen < sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_UNKNOWN;
return;
}
l2 = sizeof(*eth);
- eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
+ eth = rte_pktmbuf_mtod(m, const struct rte_ether_hdr *);
etp = eth->ether_type;
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN))
- l2 += sizeof(struct vlan_hdr);
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_VLAN))
+ l2 += sizeof(struct rte_vlan_hdr);
- if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
+ if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV4)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
fill_ipv4_hdr_len(m, l2, IPPROTO_UDP, 1);
- } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
- dlen >= l2 + sizeof(struct ipv6_hdr) +
- sizeof(struct udp_hdr)) {
+ } else if (etp == rte_be_to_cpu_16(RTE_ETHER_TYPE_IPV6) &&
+ dlen >= l2 + sizeof(struct rte_ipv6_hdr) +
+ sizeof(struct rte_udp_hdr)) {
m->packet_type = RTE_PTYPE_L4_UDP |
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L2_ETHER;
diff --git a/test/gtest/test_common.h b/test/gtest/test_common.h
index 9c521b8..5b01114 100644
--- a/test/gtest/test_common.h
+++ b/test/gtest/test_common.h
@@ -64,7 +64,7 @@ void
fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4);
int
-is_ipv4_frag(const struct ipv4_hdr *iph);
+is_ipv4_frag(const struct rte_ipv4_hdr *iph);
void
fill_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto,
diff --git a/test/gtest/test_tle_udp_stream_gen.h b/test/gtest/test_tle_udp_stream_gen.h
index 1f3d210..8476375 100644
--- a/test/gtest/test_tle_udp_stream_gen.h
+++ b/test/gtest/test_tle_udp_stream_gen.h
@@ -65,8 +65,8 @@ static int
lookup4_function(void *opaque, const struct in_addr *addr, struct tle_dest *res)
{
struct in_addr route;
- struct ether_hdr *eth;
- struct ipv4_hdr *ip4h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv4_hdr *ip4h;
auto routes = static_cast<map<string, tle_dev *> *>(opaque);
/* Check all routes added in map for a match with dest *addr */
@@ -81,11 +81,11 @@ lookup4_function(void *opaque, const struct in_addr *addr, struct tle_dest *res)
res->l2_len = sizeof(*eth);
res->l3_len = sizeof(*ip4h);
res->head_mp = mbuf_pool;
- eth = (struct ether_hdr *)res->hdr;
- eth->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip4h = (struct ipv4_hdr *)(eth + 1);
+ eth = (struct rte_ether_hdr *)res->hdr;
+ eth->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ ip4h = (struct rte_ipv4_hdr *)(eth + 1);
ip4h->version_ihl = (4 << 4) |
- (sizeof(*ip4h) / IPV4_IHL_MULTIPLIER);
+ (sizeof(*ip4h) / RTE_IPV4_IHL_MULTIPLIER);
ip4h->time_to_live = 64;
ip4h->next_proto_id = IPPROTO_UDP;
ip4h->fragment_offset = 0;
@@ -101,8 +101,8 @@ static int
lookup6_function(void *opaque, const struct in6_addr *addr,
struct tle_dest *res)
{
- struct ether_hdr *eth;
- struct ipv6_hdr *ip6h;
+ struct rte_ether_hdr *eth;
+ struct rte_ipv6_hdr *ip6h;
struct in6_addr route;
auto routes = static_cast<map<string, tle_dev *> *>(opaque);
@@ -118,9 +118,9 @@ lookup6_function(void *opaque, const struct in6_addr *addr,
res->l2_len = sizeof(*eth);
res->l3_len = sizeof(*ip6h);
res->head_mp = mbuf_pool;
- eth = (struct ether_hdr *)res->hdr;
- eth->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6h = (struct ipv6_hdr *)(eth + 1);
+ eth = (struct rte_ether_hdr *)res->hdr;
+ eth->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ ip6h = (struct rte_ipv6_hdr *)(eth + 1);
ip6h->vtc_flow = 6 << 4;
ip6h->proto = IPPROTO_UDP;
ip6h->hop_limits = 64;