summaryrefslogtreecommitdiffstats
path: root/test/test_udp.py
blob: 322d8133b0d55f8abf541107e18f9553e4badc90 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
#!/usr/bin/env python

from framework import VppTestCase, VppTestRunner
from vpp_udp_encap import *
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, VppMplsLabel

from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS


class TestUdpEncap(VppTestCase):
    """ UDP Encap Test Case """

    def setUp(self):
        super(TestUdpEncap, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # setup interfaces
        # assign them different tables.
        table_id = 0
        self.tables = []

        for i in self.pg_interfaces:
            i.admin_up()

            if table_id != 0:
                tbl = VppIpTable(self, table_id)
                tbl.add_vpp_config()
                self.tables.append(tbl)
                tbl = VppIpTable(self, table_id, is_ip6=1)
                tbl.add_vpp_config()
                self.tables.append(tbl)

            i.set_table_ip4(table_id)
            i.set_table_ip6(table_id)
            i.config_ip4()
            i.resolve_arp()
            i.config_ip6()
            i.resolve_ndp()
            table_id += 1

    def tearDown(self):
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.ip6_disable()
            i.set_table_ip4(0)
            i.set_table_ip6(0)
            i.admin_down()
        super(TestUdpEncap, self).tearDown()

    def validate_outer4(self, rx, encap_obj):
        self.assertEqual(rx[IP].src, encap_obj.src_ip_s)
        self.assertEqual(rx[IP].dst, encap_obj.dst_ip_s)
        self.assertEqual(rx[UDP].sport, encap_obj.src_port)
        self.assertEqual(rx[UDP].dport, encap_obj.dst_port)

    def validate_outer6(self, rx, encap_obj):
        self.assertEqual(rx[IPv6].src, encap_obj.src_ip_s)
        self.assertEqual(rx[IPv6].dst, encap_obj.dst_ip_s)
        self.assertEqual(rx[UDP].sport, encap_obj.src_port)
        self.assertEqual(rx[UDP].dport, encap_obj.dst_port)

    def validate_inner4(self, rx, tx, ttl=None):
        self.assertEqual(rx[IP].src, tx[IP].src)
        self.assertEqual(rx[IP].dst, tx[IP].dst)
        if ttl:
            self.assertEqual(rx[IP].ttl, ttl)
        else:
            self.assertEqual(rx[IP].ttl, tx[IP].ttl)

    def validate_inner6(self, rx, tx):
        self.assertEqual(rx.src, tx[IPv6].src)
        self.assertEqual(rx.dst, tx[IPv6].dst)
        self.assertEqual(rx.hlim, tx[IPv6].hlim)

    def test_udp_encap(self):
        """ UDP Encap test
        """

        #
        # construct a UDP encap object through each of the peers
        # v4 through the first two peears, v6 through the second.
        #
        udp_encap_0 = VppUdpEncap(self, 0,
                                  self.pg0.local_ip4,
                                  self.pg0.remote_ip4,
                                  330, 440)
        udp_encap_1 = VppUdpEncap(self, 1,
                                  self.pg1.local_ip4,
                                  self.pg1.remote_ip4,
                                  331, 441,
                                  table_id=1)
        udp_encap_2 = VppUdpEncap(self, 2,
                                  self.pg2.local_ip6,
                                  self.pg2.remote_ip6,
                                  332, 442,
                                  table_id=2,
                                  is_ip6=1)
        udp_encap_3 = VppUdpEncap(self, 3,
                                  self.pg3.local_ip6,
                                  self.pg3.remote_ip6,
                                  333, 443,
                                  table_id=3,
                                  is_ip6=1)
        udp_encap_0.add_vpp_config()
        udp_encap_1.add_vpp_config()
        udp_encap_2.add_vpp_config()
        udp_encap_3.add_vpp_config()

        #
        # Routes via each UDP encap object - all combinations of v4 and v6.
        #
        route_4o4 = VppIpRoute(self, "1.1.0.1", 32,
                               [VppRoutePath("0.0.0.0",
                                             0xFFFFFFFF,
                                             is_udp_encap=1,
                                             next_hop_id=0)])
        route_4o6 = VppIpRoute(self, "1.1.2.1", 32,
                               [VppRoutePath("0.0.0.0",
                                             0xFFFFFFFF,
                                             is_udp_encap=1,
                                             next_hop_id=2)])
        route_6o4 = VppIpRoute(self, "2001::1", 128,
                               [VppRoutePath("0.0.0.0",
                                             0xFFFFFFFF,
                                             is_udp_encap=1,
                                             next_hop_id=1)],
                               is_ip6=1)
        route_6o6 = VppIpRoute(self, "2001::3", 128,
                               [VppRoutePath("0.0.0.0",
                                             0xFFFFFFFF,
                                             is_udp_encap=1,
                                             next_hop_id=3)],
                               is_ip6=1)
        route_4o4.add_vpp_config()
        route_4o6.add_vpp_config()
        route_6o6.add_vpp_config()
        route_6o4.add_vpp_config()

        #
        # 4o4 encap
        #
        p_4o4 = (Ether(src=self.pg0.remote_mac,
                       dst=self.pg0.local_mac) /
                 IP(src="2.2.2.2", dst="1.1.0.1") /
                 UDP(sport=1234, dport=1234) /
                 Raw('\xa5' * 100))
        rx = self.send_and_expect(self.pg0, p_4o4*65, self.pg0)
        for p in rx:
            self.validate_outer4(p, udp_encap_0)
            p = IP(p["UDP"].payload.load)
            self.validate_inner4(p, p_4o4)

        #
        # 4o6 encap
        #
        p_4o6 = (Ether(src=self.pg0.remote_mac,
                       dst=self.pg0.local_mac) /
                 IP(src="2.2.2.2", dst="1.1.2.1") /
                 UDP(sport=1234, dport=1234) /
                 Raw('\xa5' * 100))
        rx = self.send_and_expect(self.pg0, p_4o6*65, self.pg2)
        for p in rx:
            self.validate_outer6(p, udp_encap_2)
            p = IP(p["UDP"].payload.load)
            self.validate_inner4(p, p_4o6)

        #
        # 6o4 encap
        #
        p_6o4 = (Ether(src=self.pg0.remote_mac,
                       dst=self.pg0.local_mac) /
                 IPv6(src="2001::100", dst="2001::1") /
                 UDP(sport=1234, dport=1234) /
                 Raw('\xa5' * 100))
        rx = self.send_and_expect(self.pg0, p_6o4*65, self.pg1)
        for p in rx:
            self.validate_outer4(p, udp_encap_1)
            p = IPv6(p["UDP"].payload.load)
            self.validate_inner6(p, p_6o4)

        #
        # 6o6 encap
        #
        p_6o6 = (Ether(src=self.pg0.remote_mac,
                       dst=self.pg0.local_mac) /
                 IPv6(src="2001::100", dst="2001::3") /
                 UDP(sport=1234, dport=1234) /
                 Raw('\xa5' * 100))
        rx = self.send_and_expect(self.pg0, p_6o6*65, self.pg3)
        for p in rx:
            self.validate_outer6(p, udp_encap_3)
            p = IPv6(p["UDP"].payload.load)
            self.validate_inner6(p, p_6o6)

        #
        # A route with an output label
        # the TTL of the inner packet is decremented on LSP ingress
        #
        route_4oMPLSo4 = VppIpRoute(self, "1.1.2.22", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  0xFFFFFFFF,
                                                  is_udp_encap=1,
                                                  next_hop_id=1,
                                                  labels=[VppMplsLabel(66)])])
        route_4oMPLSo4.add_vpp_config()

        p_4omo4 = (Ether(src=self.pg0.remote_mac,
                         dst=self.pg0.local_mac) /
                   IP(src="2.2.2.2", dst="1.1.2.22") /
                   UDP(sport=1234, dport=1234) /
                   Raw('\xa5' * 100))
        rx = self.send_and_expect(self.pg0, p_4omo4*65, self.pg1)
        for p in rx:
            self.validate_outer4(p, udp_encap_1)
            p = MPLS(p["UDP"].payload.load)
            self.validate_inner4(p, p_4omo4, ttl=63)


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
s="p">; sum0 = ip0->checksum; sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, ip4_header_t, dst_address); ip0->checksum = ip_csum_fold (sum0); old_dst_port0 = tcp0->dst; if (PREDICT_TRUE (new_dst_port0 != old_dst_port0)) { if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP)) { tcp0->dst = new_dst_port0; sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, ip4_header_t, dst_address); sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0, ip4_header_t /* cheat */ , length); tcp0->checksum = ip_csum_fold (sum0); } else { udp0->dst_port = new_dst_port0; udp0->checksum = 0; } } else { if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP)) { sum0 = tcp0->checksum; sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, ip4_header_t, dst_address); tcp0->checksum = ip_csum_fold (sum0); } } rv = 1; goto trace; } rv = 0; trace: if (do_trace && PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { nat_hairpin_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); t->addr.as_u32 = new_dst_addr0; t->port = new_dst_port0; t->fib_index = vnet_buffer (b0)->sw_if_index[VLIB_TX]; if (s0) { t->session_index = si; } else { t->session_index = ~0; } } return rv; } #endif #ifndef CLIB_MARCH_VARIANT u32 snat_icmp_hairpinning (snat_main_t * sm, vlib_buffer_t * b0, ip4_header_t * ip0, icmp46_header_t * icmp0, int is_ed) { clib_bihash_kv_8_8_t kv0, value0; u32 old_dst_addr0, new_dst_addr0; u32 old_addr0, new_addr0; u16 old_port0, new_port0; u16 old_checksum0, new_checksum0; u32 si, ti = 0; ip_csum_t sum0; snat_session_t *s0; snat_static_mapping_t *m0; if (icmp_type_is_error_message (vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags)) { ip4_header_t *inner_ip0 = 0; tcp_udp_header_t *l4_header = 0; inner_ip0 = (ip4_header_t *) ((icmp_echo_header_t *) (icmp0 + 1) + 1); l4_header = ip4_next_header (inner_ip0); u32 protocol = ip_proto_to_nat_proto (inner_ip0->protocol); if (protocol != NAT_PROTOCOL_TCP && protocol != NAT_PROTOCOL_UDP) return 1; if (is_ed) { clib_bihash_kv_16_8_t ed_kv, ed_value; init_ed_k (&ed_kv, ip0->dst_address, l4_header->src_port, ip0->src_address, l4_header->dst_port, sm->outside_fib_index, inner_ip0->protocol); if (clib_bihash_search_16_8 (&sm->out2in_ed, &ed_kv, &ed_value)) return 1; ASSERT (ti == ed_value_get_thread_index (&ed_value)); si = ed_value_get_session_index (&ed_value); } else { init_nat_k (&kv0, ip0->dst_address, l4_header->src_port, sm->outside_fib_index, protocol); if (clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, &value0)) return 1; si = value0.value; } s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); new_dst_addr0 = s0->in2out.addr.as_u32; vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; /* update inner source IP address */ old_addr0 = inner_ip0->src_address.as_u32; inner_ip0->src_address.as_u32 = new_dst_addr0; new_addr0 = inner_ip0->src_address.as_u32; sum0 = icmp0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, src_address); icmp0->checksum = ip_csum_fold (sum0); /* update inner IP header checksum */ old_checksum0 = inner_ip0->checksum; sum0 = inner_ip0->checksum; sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, src_address); inner_ip0->checksum = ip_csum_fold (sum0); new_checksum0 = inner_ip0->checksum; sum0 = icmp0->checksum; sum0 = ip_csum_update (sum0, old_checksum0, new_checksum0, ip4_header_t, checksum); icmp0->checksum = ip_csum_fold (sum0); /* update inner source port */ old_port0 = l4_header->src_port; l4_header->src_port = s0->in2out.port; new_port0 = l4_header->src_port; sum0 = icmp0->checksum; sum0 = ip_csum_update (sum0, old_port0, new_port0, tcp_udp_header_t, src_port); icmp0->checksum = ip_csum_fold (sum0); } else { init_nat_k (&kv0, ip0->dst_address, 0, sm->outside_fib_index, 0); if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv0, &value0)) { if (!is_ed) { icmp_echo_header_t *echo0 = (icmp_echo_header_t *) (icmp0 + 1); u16 icmp_id0 = echo0->identifier; init_nat_k (&kv0, ip0->dst_address, icmp_id0, sm->outside_fib_index, NAT_PROTOCOL_ICMP); if (sm->num_workers > 1) ti = (clib_net_to_host_u16 (icmp_id0) - 1024) / sm->port_per_thread; else ti = sm->num_workers; int rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, &value0); if (!rv) { si = value0.value; s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); new_dst_addr0 = s0->in2out.addr.as_u32; vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; echo0->identifier = s0->in2out.port; sum0 = icmp0->checksum; sum0 = ip_csum_update (sum0, icmp_id0, s0->in2out.port, icmp_echo_header_t, identifier); icmp0->checksum = ip_csum_fold (sum0); goto change_addr; } } return 1; } m0 = pool_elt_at_index (sm->static_mappings, value0.value); new_dst_addr0 = m0->local_addr.as_u32; if (vnet_buffer (b0)->sw_if_index[VLIB_TX] == ~0) vnet_buffer (b0)->sw_if_index[VLIB_TX] = m0->fib_index; } change_addr: /* Destination is behind the same NAT, use internal address and port */ if (new_dst_addr0) { old_dst_addr0 = ip0->dst_address.as_u32; ip0->dst_address.as_u32 = new_dst_addr0; sum0 = ip0->checksum; sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, ip4_header_t, dst_address); ip0->checksum = ip_csum_fold (sum0); } return 0; } #endif #ifndef CLIB_MARCH_VARIANT void nat_hairpinning_sm_unknown_proto (snat_main_t * sm, vlib_buffer_t * b, ip4_header_t * ip) { clib_bihash_kv_8_8_t kv, value; snat_static_mapping_t *m; u32 old_addr, new_addr; ip_csum_t sum; init_nat_k (&kv, ip->dst_address, 0, 0, 0); if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value)) return; m = pool_elt_at_index (sm->static_mappings, value.value); old_addr = ip->dst_address.as_u32; new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32; sum = ip->checksum; sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address); ip->checksum = ip_csum_fold (sum); if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0) vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index; } #endif #ifndef CLIB_MARCH_VARIANT void nat44_ed_hairpinning_unknown_proto (snat_main_t * sm, vlib_buffer_t * b, ip4_header_t * ip) { u32 old_addr, new_addr = 0, ti = 0; clib_bihash_kv_8_8_t kv, value; clib_bihash_kv_16_8_t s_kv, s_value; snat_static_mapping_t *m; ip_csum_t sum; snat_session_t *s; if (sm->num_workers > 1) ti = sm->worker_out2in_cb (b, ip, sm->outside_fib_index, 0); else ti = sm->num_workers; old_addr = ip->dst_address.as_u32; init_ed_k (&s_kv, ip->dst_address, 0, ip->src_address, 0, sm->outside_fib_index, ip->protocol); if (clib_bihash_search_16_8 (&sm->out2in_ed, &s_kv, &s_value)) { init_nat_k (&kv, ip->dst_address, 0, 0, 0); if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value)) return; m = pool_elt_at_index (sm->static_mappings, value.value); if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0) vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index; new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32; } else { ASSERT (ti == ed_value_get_thread_index (&s_value)); s = pool_elt_at_index (sm->per_thread_data[ti].sessions, ed_value_get_session_index (&s_value)); if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0) vnet_buffer (b)->sw_if_index[VLIB_TX] = s->in2out.fib_index; new_addr = ip->dst_address.as_u32 = s->in2out.addr.as_u32; } sum = ip->checksum; sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address); ip->checksum = ip_csum_fold (sum); } #endif static inline uword nat44_hairpinning_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ed) { u32 n_left_from, *from, *to_next; nat_hairpin_next_t next_index; snat_main_t *sm = &snat_main; vnet_feature_main_t *fm = &feature_main; u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index; vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index]; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; u32 next0; ip4_header_t *ip0; u32 proto0; udp_header_t *udp0; tcp_header_t *tcp0; u32 sw_if_index0; /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (b0); udp0 = ip4_next_header (ip0); tcp0 = (tcp_header_t *) udp0; sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; proto0 = ip_proto_to_nat_proto (ip0->protocol); vnet_get_config_data (&cm->config_main, &b0->current_config_index, &next0, 0); if (snat_hairpinning (vm, node, sm, b0, ip0, udp0, tcp0, proto0, is_ed, 1 /* do_trace */ )) next0 = NAT_HAIRPIN_NEXT_LOOKUP; if (next0 != NAT_HAIRPIN_NEXT_DROP) { vlib_increment_simple_counter (&sm->counters.hairpinning, vm->thread_index, sw_if_index0, 1); } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } VLIB_NODE_FN (nat44_hairpinning_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat44_hairpinning_fn_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (nat44_hairpinning_node) = { .name = "nat44-hairpinning", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .format_trace = format_nat_hairpin_trace, .n_next_nodes = NAT_HAIRPIN_N_NEXT, .next_nodes = { [NAT_HAIRPIN_NEXT_DROP] = "error-drop", [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (nat44_ed_hairpinning_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat44_hairpinning_fn_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (nat44_ed_hairpinning_node) = { .name = "nat44-ed-hairpinning", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .format_trace = format_nat_hairpin_trace, .n_next_nodes = NAT_HAIRPIN_N_NEXT, .next_nodes = { [NAT_HAIRPIN_NEXT_DROP] = "error-drop", [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup", }, }; /* *INDENT-ON* */ static inline uword snat_hairpin_dst_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ed) { u32 n_left_from, *from, *to_next; nat_hairpin_next_t next_index; snat_main_t *sm = &snat_main; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; u32 next0; ip4_header_t *ip0; u32 proto0; u32 sw_if_index0; /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); next0 = NAT_HAIRPIN_NEXT_LOOKUP; ip0 = vlib_buffer_get_current (b0); sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; proto0 = ip_proto_to_nat_proto (ip0->protocol); vnet_buffer (b0)->snat.flags = 0; if (PREDICT_FALSE (is_hairpinning (sm, &ip0->dst_address))) { if (proto0 == NAT_PROTOCOL_TCP || proto0 == NAT_PROTOCOL_UDP) { udp_header_t *udp0 = ip4_next_header (ip0); tcp_header_t *tcp0 = (tcp_header_t *) udp0; snat_hairpinning (vm, node, sm, b0, ip0, udp0, tcp0, proto0, is_ed, 1 /* do_trace */ ); } else if (proto0 == NAT_PROTOCOL_ICMP) { icmp46_header_t *icmp0 = ip4_next_header (ip0); snat_icmp_hairpinning (sm, b0, ip0, icmp0, is_ed); } else { if (is_ed) nat44_ed_hairpinning_unknown_proto (sm, b0, ip0); else nat_hairpinning_sm_unknown_proto (sm, b0, ip0); } vnet_buffer (b0)->snat.flags = SNAT_FLAG_HAIRPINNING; } if (next0 != NAT_HAIRPIN_NEXT_DROP) { vlib_increment_simple_counter (&sm->counters.hairpinning, vm->thread_index, sw_if_index0, 1); } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } VLIB_NODE_FN (snat_hairpin_dst_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return snat_hairpin_dst_fn_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_hairpin_dst_node) = { .name = "nat44-hairpin-dst", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .format_trace = format_nat_hairpin_trace, .n_next_nodes = NAT_HAIRPIN_N_NEXT, .next_nodes = { [NAT_HAIRPIN_NEXT_DROP] = "error-drop", [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (nat44_ed_hairpin_dst_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return snat_hairpin_dst_fn_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (nat44_ed_hairpin_dst_node) = { .name = "nat44-ed-hairpin-dst", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .format_trace = format_nat_hairpin_trace, .n_next_nodes = NAT_HAIRPIN_N_NEXT, .next_nodes = { [NAT_HAIRPIN_NEXT_DROP] = "error-drop", [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup", }, }; /* *INDENT-ON* */ static inline uword snat_hairpin_src_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ed) { u32 n_left_from, *from, *to_next; snat_hairpin_src_next_t next_index; snat_main_t *sm = &snat_main; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; u32 next0; snat_interface_t *i; u32 sw_if_index0; /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; vnet_feature_next (&next0, b0); /* *INDENT-OFF* */ pool_foreach (i, sm->output_feature_interfaces, ({ /* Only packets from NAT inside interface */ if ((nat_interface_is_inside(i)) && (sw_if_index0 == i->sw_if_index)) { if (PREDICT_FALSE ((vnet_buffer (b0)->snat.flags) & SNAT_FLAG_HAIRPINNING)) { if (PREDICT_TRUE (sm->num_workers > 1)) next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH; else next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT; } break; } })); /* *INDENT-ON* */ if (next0 != SNAT_HAIRPIN_SRC_NEXT_DROP) { vlib_increment_simple_counter (&sm->counters.hairpinning, vm->thread_index, sw_if_index0, 1); } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } VLIB_NODE_FN (snat_hairpin_src_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return snat_hairpin_src_fn_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_hairpin_src_node) = { .name = "nat44-hairpin-src", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT, .next_nodes = { [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop", [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-in2out-output", [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output", [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff", }, }; /* *INDENT-ON* */ VLIB_NODE_FN (nat44_ed_hairpin_src_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return snat_hairpin_src_fn_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (nat44_ed_hairpin_src_node) = { .name = "nat44-ed-hairpin-src", .vector_size = sizeof (u32), .type = VLIB_NODE_TYPE_INTERNAL, .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT, .next_nodes = { [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop", [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-ed-in2out-output", [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output", [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff", }, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */