summaryrefslogtreecommitdiffstats
path: root/test/test_vxlan_gpe.py
blob: 61d86fe07dc08802834999ef1ce1c63ebea4ea81 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/usr/bin/env python

import socket
from util import ip4n_range
import unittest
from framework import VppTestCase, VppTestRunner, running_extended_tests
from template_bd import BridgeDomain

from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol


@unittest.skipUnless(running_extended_tests(), "part of extended tests")
class TestVxlanGpe(BridgeDomain, VppTestCase):
    """ VXLAN-GPE Test Case """

    def __init__(self, *args):
        BridgeDomain.__init__(self)
        VppTestCase.__init__(self, *args)

    def encapsulate(self, pkt, vni):
        """
        Encapsulate the original payload frame by adding VXLAN-GPE header
        with its UDP, IP and Ethernet fields
        """
        return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
                UDP(sport=self.dport, dport=self.dport, chksum=0) /
                VXLAN(vni=vni, flags=self.flags) /
                pkt)

    def encap_mcast(self, pkt, src_ip, src_mac, vni):
        """
        Encapsulate the original payload frame by adding VXLAN-GPE header
        with its UDP, IP and Ethernet fields
        """
        return (Ether(src=src_mac, dst=self.mcast_mac) /
                IP(src=src_ip, dst=self.mcast_ip4) /
                UDP(sport=self.dport, dport=self.dport, chksum=0) /
                VXLAN(vni=vni, flags=self.flags) /
                pkt)

    def decapsulate(self, pkt):
        """
        Decapsulate the original payload frame by removing VXLAN-GPE header
        """
        # check if is set I and P flag
        self.assertEqual(pkt[VXLAN].flags, 0x0c)
        return pkt[VXLAN].payload

    # Method for checking VXLAN-GPE encapsulation.
    #
    def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
        # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
        #  by VPP using ARP.
        self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
        if not local_only:
            if not mcast_pkt:
                self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
            else:
                self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
        # Verify VXLAN-GPE tunnel src IP is VPP_IP and dst IP is MY_IP.
        self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
        if not local_only:
            if not mcast_pkt:
                self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
            else:
                self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
        # Verify UDP destination port is VXLAN-GPE 4790, source UDP port
        #  could be arbitrary.
        self.assertEqual(pkt[UDP].dport, type(self).dport)
        # Verify VNI
        self.assertEqual(pkt[VXLAN].vni, vni)

    @classmethod
    def create_vxlan_gpe_flood_test_bd(cls, vni, n_ucast_tunnels):
        # Create 10 ucast vxlan_gpe tunnels under bd
        ip_range_start = 10
        ip_range_end = ip_range_start + n_ucast_tunnels
        next_hop_address = cls.pg0.remote_ip4n
        for dest_ip4n in ip4n_range(next_hop_address, ip_range_start,
                                    ip_range_end):
            # add host route so dest_ip4n will not be resolved
            cls.vapi.ip_add_del_route(dest_ip4n, 32, next_hop_address)
            r = cls.vapi.vxlan_gpe_add_del_tunnel(
                src_addr=cls.pg0.local_ip4n,
                dst_addr=dest_ip4n,
                vni=vni)
            cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)

    @classmethod
    def add_del_shared_mcast_dst_load(cls, is_add):
        """
        add or del tunnels sharing the same mcast dst
        to test vxlan_gpe ref_count mechanism
        """
        n_shared_dst_tunnels = 20
        vni_start = 1000
        vni_end = vni_start + n_shared_dst_tunnels
        for vni in range(vni_start, vni_end):
            r = cls.vapi.vxlan_gpe_add_del_tunnel(
                src_addr=cls.pg0.local_ip4n,
                dst_addr=cls.mcast_ip4n,
                mcast_sw_if_index=1,
                vni=vni,
                is_add=is_add)
            if r.sw_if_index == 0xffffffff:
                raise "bad sw_if_index"

    @classmethod
    def add_shared_mcast_dst_load(cls):
        cls.add_del_shared_mcast_dst_load(is_add=1)

    @classmethod
    def del_shared_mcast_dst_load(cls):
        cls.add_del_shared_mcast_dst_load(is_add=0)

    @classmethod
    def add_del_mcast_tunnels_load(cls, is_add):
        """
        add or del tunnels to test vxlan_gpe stability
        """
        n_distinct_dst_tunnels = 20
        ip_range_start = 10
        ip_range_end = ip_range_start + n_distinct_dst_tunnels
        for dest_ip4n in ip4n_range(cls.mcast_ip4n, ip_range_start,
                                    ip_range_end):
            vni = bytearray(dest_ip4n)[3]
            cls.vapi.vxlan_gpe_add_del_tunnel(
                src_addr=cls.pg0.local_ip4n,
                dst_addr=dest_ip4n,
                mcast_sw_if_index=1,
                vni=vni,
                is_add=is_add)

    @classmethod
    def add_mcast_tunnels_load(cls):
        cls.add_del_mcast_tunnels_load(is_add=1)

    @classmethod
    def del_mcast_tunnels_load(cls):
        cls.add_del_mcast_tunnels_load(is_add=0)

    # Class method to start the VXLAN-GPE test case.
    #  Overrides setUpClass method in VppTestCase class.
    #  Python try..except statement is used to ensure that the tear down of
    #  the class will be executed even if exception is raised.
    #  @param cls The class pointer.
    @classmethod
    def setUpClass(cls):
        super(TestVxlanGpe, cls).setUpClass()

        try:
            cls.dport = 4790
            cls.flags = 0x0c

            # Create 2 pg interfaces.
            cls.create_pg_interfaces(range(4))
            for pg in cls.pg_interfaces:
                pg.admin_up()

            # Configure IPv4 addresses on VPP pg0.
            cls.pg0.config_ip4()

            # Resolve MAC address for VPP's IP address on pg0.
            cls.pg0.resolve_arp()

            # Our Multicast address
            cls.mcast_ip4 = '239.1.1.1'
            cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
            iplong = atol(cls.mcast_ip4)
            cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
                (iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)

            # Create VXLAN-GPE VTEP on VPP pg0, and put vxlan_gpe_tunnel0
            # and pg1 into BD.
            cls.single_tunnel_bd = 11
            r = cls.vapi.vxlan_gpe_add_del_tunnel(
                src_addr=cls.pg0.local_ip4n,
                dst_addr=cls.pg0.remote_ip4n,
                vni=cls.single_tunnel_bd)
            cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
                                                bd_id=cls.single_tunnel_bd)
            cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index,
                                                bd_id=cls.single_tunnel_bd)

            # Setup vni 2 to test multicast flooding
            cls.n_ucast_tunnels = 10
            cls.mcast_flood_bd = 12
            cls.create_vxlan_gpe_flood_test_bd(cls.mcast_flood_bd,
                                               cls.n_ucast_tunnels)
            r = cls.vapi.vxlan_gpe_add_del_tunnel(
                src_addr=cls.pg0.local_ip4n,
                dst_addr=cls.mcast_ip4n,
                mcast_sw_if_index=1,
                vni=cls.mcast_flood_bd)
            cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
                                                bd_id=cls.mcast_flood_bd)
            cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index,
                                                bd_id=cls.mcast_flood_bd)

            # Add and delete mcast tunnels to check stability
            cls.add_shared_mcast_dst_load()
            cls.add_mcast_tunnels_load()
            cls.del_shared_mcast_dst_load()
            cls.del_mcast_tunnels_load()

            # Setup vni 3 to test unicast flooding
            cls.ucast_flood_bd = 13
            cls.create_vxlan_gpe_flood_test_bd(cls.ucast_flood_bd,
                                               cls.n_ucast_tunnels)
            cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index,
                                                bd_id=cls.ucast_flood_bd)
        except Exception:
            super(TestVxlanGpe, cls).tearDownClass()
            raise

    @unittest.skip("test disabled for vxlan-gpe")
    def test_mcast_flood(self):
        """ inherited from BridgeDomain """
        pass

    @unittest.skip("test disabled for vxlan-gpe")
    def test_mcast_rcv(self):
        """ inherited from BridgeDomain """
        pass

    # Method to define VPP actions before tear down of the test case.
    #  Overrides tearDown method in VppTestCase class.
    #  @param self The object pointer.
    def tearDown(self):
        super(TestVxlanGpe, self).tearDown()
        if not self.vpp_dead:
            self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
            self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
            self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
            self.logger.info(self.vapi.cli("show int"))
            self.logger.info(self.vapi.cli("show vxlan-gpe"))
            self.logger.info(self.vapi.cli("show trace"))


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
class="mi">0; u8 is_add = 1; int i, rv; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "in %U", unformat_vnet_sw_interface, vnm, &sw_if_index)) vec_add1 (inside_sw_if_indices, sw_if_index); else if (unformat (line_input, "out %U", unformat_vnet_sw_interface, vnm, &sw_if_index)) vec_add1 (outside_sw_if_indices, sw_if_index); else if (unformat (line_input, "del")) is_add = 0; else { error = clib_error_return (0, "unknown input '%U'", format_unformat_error, line_input); goto done; } } if (vec_len (inside_sw_if_indices)) { for (i = 0; i < vec_len (inside_sw_if_indices); i++) { sw_if_index = inside_sw_if_indices[i]; rv = nat64_add_del_interface (sw_if_index, 1, is_add); switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: error = clib_error_return (0, "%U NAT64 feature not enabled.", format_vnet_sw_if_index_name, vnm, sw_if_index); goto done; case VNET_API_ERROR_VALUE_EXIST: error = clib_error_return (0, "%U NAT64 feature already enabled.", format_vnet_sw_if_index_name, vnm, vnm, sw_if_index); goto done; case VNET_API_ERROR_INVALID_VALUE: case VNET_API_ERROR_INVALID_VALUE_2: error = clib_error_return (0, "%U NAT64 feature enable/disable failed.", format_vnet_sw_if_index_name, vnm, sw_if_index); goto done; default: break; } } } if (vec_len (outside_sw_if_indices)) { for (i = 0; i < vec_len (outside_sw_if_indices); i++) { sw_if_index = outside_sw_if_indices[i]; rv = nat64_add_del_interface (sw_if_index, 0, is_add); switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: error = clib_error_return (0, "%U NAT64 feature not enabled.", format_vnet_sw_if_index_name, vnm, sw_if_index); goto done; case VNET_API_ERROR_VALUE_EXIST: error = clib_error_return (0, "%U NAT64 feature already enabled.", format_vnet_sw_if_index_name, vnm, sw_if_index); goto done; case VNET_API_ERROR_INVALID_VALUE: case VNET_API_ERROR_INVALID_VALUE_2: error = clib_error_return (0, "%U NAT64 feature enable/disable failed.", format_vnet_sw_if_index_name, vnm, sw_if_index); goto done; default: break; } } } done: unformat_free (line_input); vec_free (inside_sw_if_indices); vec_free (outside_sw_if_indices); return error; } static int nat64_cli_interface_walk (snat_interface_t * i, void *ctx) { vlib_main_t *vm = ctx; vnet_main_t *vnm = vnet_get_main (); vlib_cli_output (vm, " %U %s", format_vnet_sw_if_index_name, vnm, i->sw_if_index, (nat_interface_is_inside (i) && nat_interface_is_outside (i)) ? "in out" : nat_interface_is_inside (i) ? "in" : "out"); return 0; } static clib_error_t * nat64_show_interfaces_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_cli_output (vm, "NAT64 interfaces:"); nat64_interfaces_walk (nat64_cli_interface_walk, vm); return 0; } static clib_error_t * nat64_add_del_static_bib_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; clib_error_t *error = 0; u8 is_add = 1; ip6_address_t in_addr; ip4_address_t out_addr; u32 in_port = 0; u32 out_port = 0; u32 vrf_id = 0, protocol; snat_protocol_t proto = 0; u8 p = 0; int rv; if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "%U %u", unformat_ip6_address, &in_addr, &in_port)) ; else if (unformat (line_input, "%U %u", unformat_ip4_address, &out_addr, &out_port)) ; else if (unformat (line_input, "vrf %u", &vrf_id)) ; else if (unformat (line_input, "%U", unformat_snat_protocol, &proto)) ; else if (unformat (line_input, "%U %U %u", unformat_ip6_address, &in_addr, unformat_ip4_address, &out_addr, &protocol)) p = (u8) protocol; else if (unformat (line_input, "del")) is_add = 0; else { error = clib_error_return (0, "unknown input: '%U'", format_unformat_error, line_input); goto done; } } if (!p) { if (!in_port) { error = clib_error_return (0, "inside port and address must be set"); goto done; } if (!out_port) { error = clib_error_return (0, "outside port and address must be set"); goto done; } p = snat_proto_to_ip_proto (proto); } rv = nat64_add_del_static_bib_entry (&in_addr, &out_addr, (u16) in_port, (u16) out_port, p, vrf_id, is_add); switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: error = clib_error_return (0, "NAT64 BIB entry not exist."); goto done; case VNET_API_ERROR_VALUE_EXIST: error = clib_error_return (0, "NAT64 BIB entry exist."); goto done; case VNET_API_ERROR_UNSPECIFIED: error = clib_error_return (0, "Crerate NAT64 BIB entry failed."); goto done; case VNET_API_ERROR_INVALID_VALUE: error = clib_error_return (0, "Outside addres %U and port %u already in use.", format_ip4_address, &out_addr, out_port); goto done; case VNET_API_ERROR_INVALID_VALUE_2: error = clib_error_return (0, "Invalid outside port."); default: break; } done: unformat_free (line_input); return error; } static int nat64_cli_bib_walk (nat64_db_bib_entry_t * bibe, void *ctx) { vlib_main_t *vm = ctx; fib_table_t *fib; fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6); if (!fib) return -1; switch (bibe->proto) { case IP_PROTOCOL_ICMP: case IP_PROTOCOL_TCP: case IP_PROTOCOL_UDP: vlib_cli_output (vm, " %U %u %U %u protocol %U vrf %u %s %u sessions", format_ip6_address, &bibe->in_addr, clib_net_to_host_u16 (bibe->in_port), format_ip4_address, &bibe->out_addr, clib_net_to_host_u16 (bibe->out_port), format_snat_protocol, ip_proto_to_snat_proto (bibe->proto), fib->ft_table_id, bibe->is_static ? "static" : "dynamic", bibe->ses_num); break; default: vlib_cli_output (vm, " %U %U protocol %u vrf %u %s %u sessions", format_ip6_address, &bibe->in_addr, format_ip4_address, &bibe->out_addr, bibe->proto, fib->ft_table_id, bibe->is_static ? "static" : "dynamic", bibe->ses_num); } return 0; } static clib_error_t * nat64_show_bib_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { nat64_main_t *nm = &nat64_main; unformat_input_t _line_input, *line_input = &_line_input; clib_error_t *error = 0; u32 proto = ~0; u8 p = 255; nat64_db_t *db; if (!unformat_user (input, unformat_line_input, line_input)) return 0; if (unformat (line_input, "%U", unformat_snat_protocol, &proto)) p = snat_proto_to_ip_proto (proto); else if (unformat (line_input, "unknown")) p = 0; else if (unformat (line_input, "all")) ; else { error = clib_error_return (0, "unknown input: '%U'", format_unformat_error, line_input); goto done; } if (p == 255) vlib_cli_output (vm, "NAT64 BIB entries:"); else vlib_cli_output (vm, "NAT64 %U BIB entries:", format_snat_protocol, proto); /* *INDENT-OFF* */ vec_foreach (db, nm->db) nat64_db_bib_walk (db, p, nat64_cli_bib_walk, vm); /* *INDENT-ON* */ done: unformat_free (line_input); return error; } typedef struct nat64_cli_st_walk_ctx_t_ { vlib_main_t *vm; nat64_db_t *db; } nat64_cli_st_walk_ctx_t; static int nat64_cli_st_walk (nat64_db_st_entry_t * ste, void *arg) { nat64_cli_st_walk_ctx_t *ctx = arg; vlib_main_t *vm = ctx->vm; nat64_db_bib_entry_t *bibe; fib_table_t *fib; bibe = nat64_db_bib_entry_by_index (ctx->db, ste->proto, ste->bibe_index); if (!bibe) return -1; fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6); if (!fib) return -1; u32 vrf_id = fib->ft_table_id; if (ste->proto == IP_PROTOCOL_ICMP) vlib_cli_output (vm, " %U %U %u %U %U %u protocol %U vrf %u", format_ip6_address, &bibe->in_addr, format_ip6_address, &ste->in_r_addr, clib_net_to_host_u16 (bibe->in_port), format_ip4_address, &bibe->out_addr, format_ip4_address, &ste->out_r_addr, clib_net_to_host_u16 (bibe->out_port), format_snat_protocol, ip_proto_to_snat_proto (bibe->proto), vrf_id); else if (ste->proto == IP_PROTOCOL_TCP || ste->proto == IP_PROTOCOL_UDP) vlib_cli_output (vm, " %U %u %U %u %U %u %U %u protcol %U vrf %u", format_ip6_address, &bibe->in_addr, clib_net_to_host_u16 (bibe->in_port), format_ip6_address, &ste->in_r_addr, clib_net_to_host_u16 (ste->r_port), format_ip4_address, &bibe->out_addr, clib_net_to_host_u16 (bibe->out_port), format_ip4_address, &ste->out_r_addr, clib_net_to_host_u16 (ste->r_port), format_snat_protocol, ip_proto_to_snat_proto (bibe->proto), vrf_id); else vlib_cli_output (vm, " %U %U %U %U protocol %u vrf %u", format_ip6_address, &bibe->in_addr, format_ip6_address, &ste->in_r_addr, format_ip4_address, &bibe->out_addr, format_ip4_address, &ste->out_r_addr, bibe->proto, vrf_id); return 0; } static clib_error_t * nat64_show_st_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { nat64_main_t *nm = &nat64_main; unformat_input_t _line_input, *line_input = &_line_input; clib_error_t *error = 0; u32 proto = ~0; u8 p = 255; nat64_db_t *db; nat64_cli_st_walk_ctx_t ctx = { .vm = vm, }; if (!unformat_user (input, unformat_line_input, line_input)) return 0; if (unformat (line_input, "%U", unformat_snat_protocol, &proto)) p = snat_proto_to_ip_proto (proto); else if (unformat (line_input, "unknown")) p = 0; else if (unformat (line_input, "all")) ; else { error = clib_error_return (0, "unknown input: '%U'", format_unformat_error, line_input); goto done; } if (p == 255) vlib_cli_output (vm, "NAT64 sessions:"); else vlib_cli_output (vm, "NAT64 %U sessions:", format_snat_protocol, proto); /* *INDENT-OFF* */ vec_foreach (db, nm->db) { ctx.db = db; nat64_db_st_walk (db, p, nat64_cli_st_walk, &ctx); } /* *INDENT-ON* */ done: unformat_free (line_input); return error; } static clib_error_t * nat64_add_del_prefix_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vnet_main_t *vnm = vnet_get_main (); clib_error_t *error = 0; unformat_input_t _line_input, *line_input = &_line_input; u8 is_add = 1; u32 vrf_id = 0, sw_if_index = ~0; ip6_address_t prefix; u32 plen = 0; int rv; if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "%U/%u", unformat_ip6_address, &prefix, &plen)) ; else if (unformat (line_input, "tenant-vrf %u", &vrf_id)) ; else if (unformat (line_input, "del")) is_add = 0; else if (unformat (line_input, "interface %U", unformat_vnet_sw_interface, vnm, &sw_if_index)) ; else { error = clib_error_return (0, "unknown input: '%U'", format_unformat_error, line_input); goto done; } } if (!plen) { error = clib_error_return (0, "NAT64 prefix must be set."); goto done; } rv = nat64_add_del_prefix (&prefix, (u8) plen, vrf_id, is_add); switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: error = clib_error_return (0, "NAT64 prefix not exist."); goto done; case VNET_API_ERROR_INVALID_VALUE: error = clib_error_return (0, "Invalid prefix length."); goto done; default: break; } /* * Add RX interface route, whenNAT isn't running on the real input * interface */ if (sw_if_index != ~0) { u32 fib_index; fib_prefix_t fibpfx = { .fp_len = plen, .fp_proto = FIB_PROTOCOL_IP6, .fp_addr = { .ip6 = prefix} }; if (is_add) { fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id, FIB_SOURCE_PLUGIN_HI); fib_table_entry_update_one_path (fib_index, &fibpfx, FIB_SOURCE_PLUGIN_HI, FIB_ENTRY_FLAG_NONE, DPO_PROTO_IP6, NULL, sw_if_index, ~0, 0, NULL, FIB_ROUTE_PATH_INTF_RX); } else { fib_index = fib_table_find (FIB_PROTOCOL_IP6, vrf_id); fib_table_entry_path_remove (fib_index, &fibpfx, FIB_SOURCE_PLUGIN_HI, DPO_PROTO_IP6, NULL, sw_if_index, ~0, 1, FIB_ROUTE_PATH_INTF_RX); fib_table_unlock (fib_index, FIB_PROTOCOL_IP6, FIB_SOURCE_PLUGIN_HI); } } done: unformat_free (line_input); return error; } static int nat64_cli_prefix_walk (nat64_prefix_t * p, void *ctx) { vlib_main_t *vm = ctx; vlib_cli_output (vm, " %U/%u tenant-vrf %u", format_ip6_address, &p->prefix, p->plen, p->vrf_id); return 0; } static clib_error_t * nat64_show_prefix_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_cli_output (vm, "NAT64 prefix:"); nat64_prefix_walk (nat64_cli_prefix_walk, vm); return 0; } static clib_error_t * nat64_add_interface_address_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vnet_main_t *vnm = vnet_get_main (); unformat_input_t _line_input, *line_input = &_line_input; u32 sw_if_index; int rv; int is_add = 1; clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index)); else if (unformat (line_input, "del")) is_add = 0; else { error = clib_error_return (0, "unknown input '%U'", format_unformat_error, line_input); goto done; } } rv = nat64_add_interface_address (sw_if_index, is_add); switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: error = clib_error_return (0, "entry not exist"); break; case VNET_API_ERROR_VALUE_EXIST: error = clib_error_return (0, "entry exist"); break; default: break; } done: unformat_free (line_input); return error; } /* *INDENT-OFF* */ /*? * @cliexpar * @cliexstart{nat64 add pool address} * Add/delete NAT64 pool address. * To add single NAT64 pool address use: * vpp# nat64 add pool address 10.1.1.10 * To add NAT64 pool address range use: * vpp# nat64 add pool address 10.1.1.2 - 10.1.1.5 * To add NAT64 pool address for specific tenant use: * vpp# nat64 add pool address 10.1.1.100 tenant-vrf 100 * @cliexend ?*/ VLIB_CLI_COMMAND (nat64_add_pool_address_command, static) = { .path = "nat64 add pool address", .short_help = "nat64 add pool address <ip4-range-start> [- <ip4-range-end>] " "[tenant-vrf <vrf-id>] [del]", .function = nat64_add_del_pool_addr_command_fn, }; /*? * @cliexpar * @cliexstart{show nat64 pool} * Show NAT64 pool. * vpp# show nat64 pool * NAT64 pool: * 10.1.1.3 tenant VRF: 0 * 10.1.1.10 tenant VRF: 10 * @cliexend ?*/ VLIB_CLI_COMMAND (show_nat64_pool_command, static) = { .path = "show nat64 pool", .short_help = "show nat64 pool", .function = nat64_show_pool_command_fn, }; /*? * @cliexpar * @cliexstart{set interface nat64} * Enable/disable NAT64 feature on the interface. * To enable NAT64 feature with local (IPv6) network interface * GigabitEthernet0/8/0 and external (IPv4) network interface * GigabitEthernet0/a/0 use: * vpp# set interface nat64 in GigabitEthernet0/8/0 out GigabitEthernet0/a/0 * @cliexend ?*/ VLIB_CLI_COMMAND (set_interface_nat64_command, static) = { .path = "set interface nat64", .short_help = "set interface nat64 in|out <intfc> [del]", .function = nat64_interface_feature_command_fn, }; /*? * @cliexpar * @cliexstart{show nat64 interfaces} * Show interfaces with NAT64 feature. * To show interfaces with NAT64 feature use: * vpp# show nat64 interfaces * NAT64 interfaces: * GigabitEthernet0/8/0 in * GigabitEthernet0/a/0 out * @cliexend ?*/ VLIB_CLI_COMMAND (show_nat64_interfaces_command, static) = { .path = "show nat64 interfaces", .short_help = "show nat64 interfaces", .function = nat64_show_interfaces_command_fn, }; /*? * @cliexpar * @cliexstart{nat64 add static bib} * Add/delete NAT64 static BIB entry. * To create NAT64 satatic BIB entry use: * vpp# nat64 add static bib 2001:db8:c000:221:: 1234 10.1.1.3 5678 tcp * vpp# nat64 add static bib 2001:db8:c000:221:: 1234 10.1.1.3 5678 udp vrf 10 * @cliexend ?*/ VLIB_CLI_COMMAND (nat64_add_del_static_bib_command, static) = { .path = "nat64 add static bib", .short_help = "nat64 add static bib <ip6-addr> <port> <ip4-addr> <port> " "tcp|udp|icmp [vfr <table-id>] [del]", .function = nat64_add_del_static_bib_command_fn, }; /*? * @cliexpar * @cliexstart{show nat64 bib} * Show NAT64 BIB entries. * To show NAT64 TCP BIB entries use: * vpp# show nat64 bib tcp * NAT64 tcp BIB: * fd01:1::2 6303 10.0.0.3 62303 tcp vrf 0 dynamic 1 sessions * 2001:db8:c000:221:: 1234 10.1.1.3 5678 tcp vrf 0 static 2 sessions * To show NAT64 UDP BIB entries use: * vpp# show nat64 bib udp * NAT64 udp BIB: * fd01:1::2 6304 10.0.0.3 10546 udp vrf 0 dynamic 10 sessions * 2001:db8:c000:221:: 1234 10.1.1.3 5678 udp vrf 10 static 0 sessions * To show NAT64 ICMP BIB entries use: * vpp# show nat64 bib icmp * NAT64 icmp BIB: * fd01:1::2 6305 10.0.0.3 63209 icmp vrf 10 dynamic 1 sessions * @cliexend ?*/ VLIB_CLI_COMMAND (show_nat64_bib_command, static) = { .path = "show nat64 bib", .short_help = "show nat64 bib all|tcp|udp|icmp|unknown", .function = nat64_show_bib_command_fn, }; /*? * @cliexpar * @cliexstart{show nat64 session table} * Show NAT64 session table. * To show NAT64 TCP session table use: * vpp# show nat64 session table tcp * NAT64 tcp session table: * fd01:1::2 6303 64:ff9b::ac10:202 20 10.0.0.3 62303 172.16.2.2 20 tcp vrf 0 * fd01:3::2 6303 64:ff9b::ac10:202 20 10.0.10.3 21300 172.16.2.2 20 tcp vrf 10 * To show NAT64 UDP session table use: * #vpp show nat64 session table udp * NAT64 udp session table: * fd01:1::2 6304 64:ff9b::ac10:202 20 10.0.0.3 10546 172.16.2.2 20 udp vrf 0 * fd01:3::2 6304 64:ff9b::ac10:202 20 10.0.10.3 58627 172.16.2.2 20 udp vrf 10 * fd01:1::2 1235 64:ff9b::a00:3 4023 10.0.0.3 24488 10.0.0.3 4023 udp vrf 0 * fd01:1::3 23 64:ff9b::a00:3 24488 10.0.0.3 4023 10.0.0.3 24488 udp vrf 0 * To show NAT64 ICMP session table use: * #vpp show nat64 session table icmp * NAT64 icmp session table: * fd01:1::2 64:ff9b::ac10:202 6305 10.0.0.3 172.16.2.2 63209 icmp vrf 0 * @cliexend ?*/ VLIB_CLI_COMMAND (show_nat64_st_command, static) = { .path = "show nat64 session table", .short_help = "show nat64 session table all|tcp|udp|icmp|unknown", .function = nat64_show_st_command_fn, }; /*? * @cliexpar * @cliexstart{nat64 add prefix} * Set NAT64 prefix for generating IPv6 representations of IPv4 addresses. * To set NAT64 global prefix use: * vpp# nat64 add prefix 2001:db8::/32 * To set NAT64 prefix for specific tenant use: * vpp# nat64 add prefix 2001:db8:122:300::/56 tenant-vrf 10 * @cliexend ?*/ VLIB_CLI_COMMAND (nat64_add_del_prefix_command, static) = { .path = "nat64 add prefix", .short_help = "nat64 add prefix <ip6-prefix>/<plen> [tenant-vrf <vrf-id>] " "[del] [interface <interface]", .function = nat64_add_del_prefix_command_fn, }; /*? * @cliexpar * @cliexstart{show nat64 prefix} * Show NAT64 prefix. * To show NAT64 prefix use: * vpp# show nat64 prefix * NAT64 prefix: * 2001:db8::/32 tenant-vrf 0 * 2001:db8:122:300::/56 tenant-vrf 10 * @cliexend ?*/ VLIB_CLI_COMMAND (show_nat64_prefix_command, static) = { .path = "show nat64 prefix", .short_help = "show nat64 prefix", .function = nat64_show_prefix_command_fn, }; /*? * @cliexpar * @cliexstart{nat64 add interface address} * Add/delete NAT64 pool address from specific (DHCP addressed) interface. * To add NAT64 pool address from specific interface use: * vpp# nat64 add interface address GigabitEthernet0/8/0 * @cliexend ?*/ VLIB_CLI_COMMAND (nat64_add_interface_address_command, static) = { .path = "nat64 add interface address", .short_help = "nat64 add interface address <interface> [del]", .function = nat64_add_interface_address_command_fn, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */