aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/nfv_density/dcr_memif/chain/2n-10ge2p1x710-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr.robot
blob: d9c95c4af735f8de84d8b6542c337dbaca0fe076 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/shared/default.robot
| ...
| Force Tags | 2_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDR
| ... | NIC_Intel-X710 | ETH | L2BDMACLRN | BASE | MEMIF | DOCKER | 6R2C
| ... | NF_DENSITY | CHAIN | NF_VPPIP4 | 12DCR2T
| ...
| Suite Setup | Setup suite single link | performance
| Suite Teardown | Tear down suite | performance
| Test Setup | Setup test
| Test Teardown | Tear down test | performance | container
| ...
| Test Template | Local Template
| ...
| Documentation | **RFC2544: Pkt throughput L2BD test cases with memif 6 chains
| ... | 12 docker container*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-TG 2-node circular topology with
| ... | single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 bridge domain.
| ... | *[Cfg] DUT configuration:* DUT1 is configured with two L2 bridge domains
| ... | and MAC learning enabled. DUT1 tested with ${nic_name}.\
| ... | Container is connected to VPP via Memif interface. Container is running
| ... | same VPP version as running on DUT. Container is limited via cgroup to
| ... | use cores allocated from pool of isolated CPUs. There are no memory
| ... | contraints.
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop\
| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\
| ... | of packets transmitted. NDR and PDR are discovered for different\
| ... | Ethernet L2 frame sizes using MLRsearch library.\
| ... | Test packets are generated by TG on links to DUTs. TG traffic profile
| ... | contains two L3 flow-groups (flow-group per direction, 254 flows per
| ... | flow-group) with all packets containing Ethernet header, IPv4 header
| ... | with IP protocol=61 and static payload. MAC addresses are matching MAC
| ... | addresses of the TG node interfaces.

*** Variables ***
| @{plugins_to_enable}= | dpdk_plugin.so | memif_plugin.so
| ${osi_layer}= | L3
| ${nic_name}= | Intel-X710
| ${nic_driver}= | vfio-pci
| ${overhead}= | ${0}
| ${nf_dtcr}= | ${1}
| ${nf_dtc}= | ${1}
| ${nf_chains}= | ${6}
| ${nf_nodes}= | ${2}
# Traffic profile:
| ${traffic_profile}= | trex-sl-2n3n-ethip4-ip4src254-6c2n
# Container
| ${container_engine}= | Docker
| ${container_chain_topology}= | chain_ip4

*** Keywords ***
| Local Template
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config.
| | ... | Each DUT uses ${phy_cores} physical core(s) for worker threads.
| | ... | [Ver] Measure NDR and PDR values using MLRsearch algorithm.\
| | ...
| | ... | *Arguments:*
| | ... | - frame_size - Framesize in Bytes in integer or string (IMIX_v4_1).
| | ... | Type: integer, string
| | ... | - phy_cores - Number of physical cores. Type: integer
| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
| | ...
| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
| | ...
| | Set Test Variable | \${frame_size}
| | ...
| | Given Add worker threads and rxqueues to all DUTs | ${phy_cores} | ${rxq}
| | And Add PCI devices to all DUTs
| | And Set Max Rate And Jumbo And Handle Multi Seg
| | And Apply startup configuration on all VPP DUTs
| | When Initialize layer driver | ${nic_driver}
| | And Initialize layer interface
| | ... | count=${nf_chains}
| | And Start containers for test
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | auto_scale=${False}
| | And Initialize L2 Bridge Domain for multiple chains with memif pairs
| | ... | nf_chains=${nf_chains} | nf_nodes=${nf_nodes} | auto_scale=${False}
| | Then Find NDR and PDR intervals using optimized search

*** Test Cases ***
| tc01-64B-1c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 1C
| | frame_size=${64} | phy_cores=${1}

| tc02-64B-2c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 2C
| | frame_size=${64} | phy_cores=${2}

| tc03-64B-4c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 64B | 4C
| | frame_size=${64} | phy_cores=${4}

| tc04-1518B-1c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 1C
| | frame_size=${1518} | phy_cores=${1}

| tc05-1518B-2c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 2C
| | frame_size=${1518} | phy_cores=${2}

| tc06-1518B-4c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 1518B | 4C
| | frame_size=${1518} | phy_cores=${4}

| tc07-9000B-1c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 1C
| | frame_size=${9000} | phy_cores=${1}

| tc08-9000B-2c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 2C
| | frame_size=${9000} | phy_cores=${2}

| tc09-9000B-4c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | 9000B | 4C
| | frame_size=${9000} | phy_cores=${4}

| tc10-IMIX-1c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 1C
| | frame_size=IMIX_v4_1 | phy_cores=${1}

| tc11-IMIX-2c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 2C
| | frame_size=IMIX_v4_1 | phy_cores=${2}

| tc12-IMIX-4c-eth-l2bd-6ch-24mif-12dcr2t-vppip4-ndrpdr
| | [Tags] | IMIX | 4C
| | frame_size=IMIX_v4_1 | phy_cores=${4}
span class="n">ip4_mapt_next_t; typedef enum { IP4_MAPT_ICMP_NEXT_IP6_LOOKUP, IP4_MAPT_ICMP_NEXT_IP6_FRAG, IP4_MAPT_ICMP_NEXT_DROP, IP4_MAPT_ICMP_N_NEXT } ip4_mapt_icmp_next_t; typedef enum { IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP, IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG, IP4_MAPT_TCP_UDP_NEXT_DROP, IP4_MAPT_TCP_UDP_N_NEXT } ip4_mapt_tcp_udp_next_t; typedef enum { IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP, IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG, IP4_MAPT_FRAGMENTED_NEXT_DROP, IP4_MAPT_FRAGMENTED_N_NEXT } ip4_mapt_fragmented_next_t; //This is used to pass information within the buffer data. //Buffer structure being too small to contain big structures like this. /* *INDENT-OFF* */ typedef CLIB_PACKED (struct { ip6_address_t daddr; ip6_address_t saddr; //IPv6 header + Fragmentation header will be here //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4) u8 unused[28]; }) ip4_mapt_pseudo_header_t; /* *INDENT-ON* */ static_always_inline int ip4_map_fragment_cache (ip4_header_t * ip4, u16 port) { u32 *ignore = NULL; map_ip4_reass_lock (); map_ip4_reass_t *r = map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32, ip4->fragment_id, (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol, &ignore); if (r) r->port = port; map_ip4_reass_unlock (); return !r; } static_always_inline i32 ip4_map_fragment_get_port (ip4_header_t * ip4) { u32 *ignore = NULL; map_ip4_reass_lock (); map_ip4_reass_t *r = map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32, ip4->fragment_id, (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol, &ignore); i32 ret = r ? r->port : -1; map_ip4_reass_unlock (); return ret; } typedef struct { map_domain_t *d; u16 id; } icmp_to_icmp6_ctx_t; static int ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg) { icmp_to_icmp6_ctx_t *ctx = arg; map_main_t *mm = &map_main; if (mm->is_ce) { ip6->src_address.as_u64[0] = map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id); ip6->src_address.as_u64[1] = map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id); ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address); } else { ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address); ip6->dst_address.as_u64[0] = map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id); ip6->dst_address.as_u64[1] = map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id); } return 0; } static int ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg) { icmp_to_icmp6_ctx_t *ctx = arg; map_main_t *mm = &map_main; if (mm->is_ce) { //Note that the destination address is within the domain //while the source address is the one outside the domain ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address); ip6->dst_address.as_u64[0] = map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id); ip6->dst_address.as_u64[1] = map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id); } else { //Note that the source address is within the domain //while the destination address is the one outside the domain ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address); ip6->src_address.as_u64[0] = map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id); ip6->src_address.as_u64[1] = map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id); } return 0; } static uword ip4_map_t_icmp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; vlib_buffer_t *p0; ip4_mapt_icmp_next_t next0; u8 error0; map_domain_t *d0; u16 len0; icmp_to_icmp6_ctx_t ctx0; ip4_header_t *ip40; icmp46_header_t *icmp0; next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP; pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; error0 = MAP_ERROR_NONE; p0 = vlib_get_buffer (vm, pi0); vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used len0 = clib_net_to_host_u16 (((ip4_header_t *) vlib_buffer_get_current (p0))->length); d0 = pool_elt_at_index (map_main.domains, vnet_buffer (p0)->map_t.map_domain_index); ip40 = vlib_buffer_get_current (p0); icmp0 = (icmp46_header_t *) (ip40 + 1); ctx0.id = ip4_get_port (ip40, icmp0->type == ICMP6_echo_request); ctx0.d = d0; if (ctx0.id == 0) { // In case of 1:1 mapping, we don't care about the port if (!(d0->ea_bits_len == 0 && d0->rules)) { error0 = MAP_ERROR_ICMP; goto err0; } } if (icmp_to_icmp6 (p0, ip4_to_ip6_set_icmp_cb, &ctx0, ip4_to_ip6_set_inner_icmp_cb, &ctx0)) { error0 = MAP_ERROR_ICMP; goto err0; } if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { vnet_buffer (p0)->ip_frag.header_offset = 0; vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG; } err0: if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, len0); } else { next0 = IP4_MAPT_ICMP_NEXT_DROP; } p0->error = error_node->errors[error0]; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } static int ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx) { ip4_mapt_pseudo_header_t *pheader = ctx; ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0]; ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1]; ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0]; ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1]; return 0; } static uword ip4_map_t_fragmented (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; vlib_buffer_t *p0; ip4_mapt_pseudo_header_t *pheader0; ip4_mapt_fragmented_next_t next0; next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP; pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; p0 = vlib_get_buffer (vm, pi0); //Accessing pseudo header pheader0 = vlib_buffer_get_current (p0); vlib_buffer_advance (p0, sizeof (*pheader0)); if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0)) { p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP; } else { if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { vnet_buffer (p0)->ip_frag.header_offset = 0; vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG; } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } static uword ip4_map_t_tcp_udp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); #ifdef IP4_MAP_T_DUAL_LOOP while (n_left_from >= 4 && n_left_to_next >= 2) { u32 pi0, pi1; vlib_buffer_t *p0, *p1; ip4_mapt_pseudo_header_t *pheader0, *pheader1; ip4_mapt_tcp_udp_next_t next0, next1; pi0 = to_next[0] = from[0]; pi1 = to_next[1] = from[1]; from += 2; n_left_from -= 2; to_next += 2; n_left_to_next -= 2; next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); //Accessing pseudo header pheader0 = vlib_buffer_get_current (p0); pheader1 = vlib_buffer_get_current (p1); vlib_buffer_advance (p0, sizeof (*pheader0)); vlib_buffer_advance (p1, sizeof (*pheader1)); if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0)) { p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; next0 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { //Send to fragmentation node if necessary vnet_buffer (p0)->ip_frag.header_offset = 0; vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; } } if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1)) { p1->error = error_node->errors[MAP_ERROR_UNKNOWN]; next1 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { if (vnet_buffer (p1)->map_t.mtu < p1->current_length) { //Send to fragmentation node if necessary vnet_buffer (p1)->ip_frag.header_offset = 0; vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; } } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1); } #endif while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; vlib_buffer_t *p0; ip4_mapt_pseudo_header_t *pheader0; ip4_mapt_tcp_udp_next_t next0; pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; p0 = vlib_get_buffer (vm, pi0); //Accessing pseudo header pheader0 = vlib_buffer_get_current (p0); vlib_buffer_advance (p0, sizeof (*pheader0)); if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0)) { p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; next0 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { //Send to fragmentation node if necessary vnet_buffer (p0)->ip_frag.header_offset = 0; vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } static_always_inline void ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0, ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0, u8 * error0, ip4_mapt_next_t * next0) { map_main_t *mm = &map_main; u32 port_offset; if (mm->is_ce) port_offset = 0; else port_offset = 2; if (PREDICT_FALSE (ip4_get_fragment_offset (ip40))) { *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED; if (d0->ea_bits_len == 0 && d0->rules) { *dst_port0 = 0; } else { *dst_port0 = ip4_map_fragment_get_port (ip40); *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0; } } else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP)) { vnet_buffer (p0)->map_t.checksum_offset = 36; *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP; *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0; *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset)); } else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP)) { vnet_buffer (p0)->map_t.checksum_offset = 26; *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP; *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0; *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset)); } else if (ip40->protocol == IP_PROTOCOL_ICMP) { *next0 = IP4_MAPT_NEXT_MAPT_ICMP; if (d0->ea_bits_len == 0 && d0->rules) *dst_port0 = 0; else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code == ICMP4_echo_reply || ((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code == ICMP4_echo_request) *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6)); } else { *error0 = MAP_ERROR_BAD_PROTOCOL; } } static uword ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_map_t_node.index); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = map_main.domain_counters; u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); #ifdef IP4_MAP_T_DUAL_LOOP while (n_left_from >= 4 && n_left_to_next >= 2) { u32 pi0, pi1; vlib_buffer_t *p0, *p1; ip4_header_t *ip40, *ip41; map_domain_t *d0, *d1; ip4_mapt_next_t next0 = 0, next1 = 0; u16 ip4_len0, ip4_len1; u8 error0, error1; i32 map_port0, map_port1; ip4_mapt_pseudo_header_t *pheader0, *pheader1; pi0 = to_next[0] = from[0]; pi1 = to_next[1] = from[1]; from += 2; n_left_from -= 2; to_next += 2; n_left_to_next -= 2; error0 = MAP_ERROR_NONE; error1 = MAP_ERROR_NONE; p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); ip40 = vlib_buffer_get_current (p0); ip41 = vlib_buffer_get_current (p1); ip4_len0 = clib_host_to_net_u16 (ip40->length); ip4_len1 = clib_host_to_net_u16 (ip41->length); if (PREDICT_FALSE (p0->current_length < ip4_len0 || ip40->ip_version_and_header_length != 0x45)) { error0 = MAP_ERROR_UNKNOWN; next0 = IP4_MAPT_NEXT_DROP; } if (PREDICT_FALSE (p1->current_length < ip4_len1 || ip41->ip_version_and_header_length != 0x45)) { error1 = MAP_ERROR_UNKNOWN; next1 = IP4_MAPT_NEXT_DROP; } vnet_buffer (p0)->map_t.map_domain_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index); vnet_buffer (p1)->map_t.map_domain_index = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index); vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0; map_port0 = -1; map_port1 = -1; ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0, &next0); ip4_map_t_classify (p1, d1, ip41, ip4_len1, &map_port1, &error1, &next1); //Add MAP-T pseudo header in front of the packet vlib_buffer_advance (p0, -sizeof (*pheader0)); vlib_buffer_advance (p1, -sizeof (*pheader1)); pheader0 = vlib_buffer_get_current (p0); pheader1 = vlib_buffer_get_current (p1); //Save addresses within the packet if (mm->is_ce) { ip4_map_t_embedded_address (d0, &pheader0->daddr, &ip40->dst_address); ip4_map_t_embedded_address (d1, &pheader1->daddr, &ip41->dst_address); pheader0->saddr.as_u64[0] = map_get_pfx_net (d0, ip40->src_address.as_u32, (u16) map_port0); pheader0->saddr.as_u64[1] = map_get_sfx_net (d0, ip40->src_address.as_u32, (u16) map_port0); pheader1->saddr.as_u64[0] = map_get_pfx_net (d1, ip41->src_address.as_u32, (u16) map_port1); pheader1->saddr.as_u64[1] = map_get_sfx_net (d1, ip41->src_address.as_u32, (u16) map_port1); } else { ip4_map_t_embedded_address (d0, &pheader0->saddr, &ip40->src_address); ip4_map_t_embedded_address (d1, &pheader1->saddr, &ip41->src_address); pheader0->daddr.as_u64[0] = map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) map_port0); pheader0->daddr.as_u64[1] = map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) map_port0); pheader1->daddr.as_u64[0] = map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) map_port1); pheader1->daddr.as_u64[1] = map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) map_port1); } if (PREDICT_FALSE (ip4_is_first_fragment (ip40) && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && ip4_map_fragment_cache (ip40, map_port0))) { error0 = MAP_ERROR_FRAGMENT_MEMORY; } if (PREDICT_FALSE (ip4_is_first_fragment (ip41) && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) && ip4_map_fragment_cache (ip41, map_port1))) { error1 = MAP_ERROR_FRAGMENT_MEMORY; } if (PREDICT_TRUE (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 (ip40->length)); } if (PREDICT_TRUE (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, vnet_buffer (p1)-> map_t.map_domain_index, 1, clib_net_to_host_u16 (ip41->length)); } next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1; p0->error = error_node->errors[error0]; p1->error = error_node->errors[error1]; vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1); } #endif while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; vlib_buffer_t *p0; ip4_header_t *ip40; map_domain_t *d0; ip4_mapt_next_t next0; u16 ip4_len0; u8 error0; i32 map_port0; ip4_mapt_pseudo_header_t *pheader0; pi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; error0 = MAP_ERROR_NONE; p0 = vlib_get_buffer (vm, pi0); ip40 = vlib_buffer_get_current (p0); ip4_len0 = clib_host_to_net_u16 (ip40->length); if (PREDICT_FALSE (p0->current_length < ip4_len0 || ip40->ip_version_and_header_length != 0x45)) { error0 = MAP_ERROR_UNKNOWN; next0 = IP4_MAPT_NEXT_DROP; } vnet_buffer (p0)->map_t.map_domain_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index); vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; map_port0 = -1; ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0, &next0); //Add MAP-T pseudo header in front of the packet vlib_buffer_advance (p0, -sizeof (*pheader0)); pheader0 = vlib_buffer_get_current (p0); //Save addresses within the packet if (mm->is_ce) { ip4_map_t_embedded_address (d0, &pheader0->daddr, &ip40->dst_address); pheader0->saddr.as_u64[0] = map_get_pfx_net (d0, ip40->src_address.as_u32, (u16) map_port0); pheader0->saddr.as_u64[1] = map_get_sfx_net (d0, ip40->src_address.as_u32, (u16) map_port0); } else { ip4_map_t_embedded_address (d0, &pheader0->saddr, &ip40->src_address); pheader0->daddr.as_u64[0] = map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) map_port0); pheader0->daddr.as_u64[1] = map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) map_port0); } //It is important to cache at this stage because the result might be necessary //for packets within the same vector. //Actually, this approach even provides some limited out-of-order fragments support if (PREDICT_FALSE (ip4_is_first_fragment (ip40) && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && ip4_map_fragment_cache (ip40, map_port0))) { error0 = MAP_ERROR_UNKNOWN; } if (PREDICT_TRUE (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 (ip40->length)); } next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; p0->error = error_node->errors[error0]; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } static char *map_t_error_strings[] = { #define _(sym,string) string, foreach_map_error #undef _ }; /* *INDENT-OFF* */ VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = { .function = ip4_map_t_fragmented, .name = "ip4-map-t-fragmented", .vector_size = sizeof(u32), .format_trace = format_map_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = MAP_N_ERROR, .error_strings = map_t_error_strings, .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT, .next_nodes = { [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup", [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = { .function = ip4_map_t_icmp, .name = "ip4-map-t-icmp", .vector_size = sizeof(u32), .format_trace = format_map_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = MAP_N_ERROR, .error_strings = map_t_error_strings, .n_next_nodes = IP4_MAPT_ICMP_N_NEXT, .next_nodes = { [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup", [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = { .function = ip4_map_t_tcp_udp, .name = "ip4-map-t-tcp-udp", .vector_size = sizeof(u32), .format_trace = format_map_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = MAP_N_ERROR, .error_strings = map_t_error_strings, .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT, .next_nodes = { [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup", [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_REGISTER_NODE(ip4_map_t_node) = { .function = ip4_map_t, .name = "ip4-map-t", .vector_size = sizeof(u32), .format_trace = format_map_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = MAP_N_ERROR, .error_strings = map_t_error_strings, .n_next_nodes = IP4_MAPT_N_NEXT, .next_nodes = { [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp", [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp", [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented", [IP4_MAPT_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */