summaryrefslogtreecommitdiffstats
path: root/src/vnet/mfib/mfib_table.h
blob: 6be4f798cd7b33a7f0a74663abb76cf8d07ba900 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __MFIB_TABLE_H__
#define __MFIB_TABLE_H__

#include <vnet/ip/ip.h>
#include <vnet/adj/adj.h>
#include <vnet/dpo/replicate_dpo.h>

#include <vnet/mfib/mfib_types.h>

/**
 * Keep a lock per-source and a total
 */
#define MFIB_TABLE_N_LOCKS (MFIB_N_SOURCES+1)
#define MFIB_TABLE_TOTAL_LOCKS MFIB_N_SOURCES

/**
 * @brief
 *   A protocol Independent IP multicast FIB table
 */
typedef struct mfib_table_t_
{
    /**
     * Required for pool_get_aligned
     */
    CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);

    /**
     * A union of the protocol specific FIBs that provide the
     * underlying LPM mechanism.
     * This element is first in the struct so that it is in the
     * first cache line.
     */
    union {
        ip4_mfib_t v4;
        ip6_mfib_t v6;
    };

    /**
     * Which protocol this table serves. Used to switch on the union above.
     */
    fib_protocol_t mft_proto;

    /**
     * number of locks on the table
     */
    u16 mft_locks[MFIB_TABLE_N_LOCKS];

    /**
     * Table ID (hash key) for this FIB.
     */
    u32 mft_table_id;

    /**
     * Index into FIB vector.
     */
    fib_node_index_t mft_index;

    /**
     * Total route counters
     */
    u32 mft_total_route_counts;

    /**
     * Table description
     */
    u8* mft_desc;
} mfib_table_t;

/**
 * @brief
 *  Format the description/name of the table
 */
extern u8* format_mfib_table_name(u8* s, va_list *ap);

/**
 * @brief
 *  Perfom a longest prefix match in the non-forwarding table
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix to lookup
 *
 * @return
 *  The index of the fib_entry_t for the best match, which may be the default route
 */
extern fib_node_index_t mfib_table_lookup(u32 fib_index,
                                         const mfib_prefix_t *prefix);

/**
 * @brief
 *  Perfom an exact match in the non-forwarding table
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix to lookup
 *
 * @return
 *  The index of the fib_entry_t for the exact match, or INVALID
 *  is there is no match.
 */
extern fib_node_index_t mfib_table_lookup_exact_match(u32 fib_index,
                                                      const mfib_prefix_t *prefix);

/**
 * @brief
 * Add a new (with no replication) or lock an existing entry
 *
 * @param prefix
 *  The prefix for the entry to add
 *
 * @return
 *  the index of the fib_entry_t that is created (or existed already).
 */
extern fib_node_index_t mfib_table_entry_update(u32 fib_index,
                                                const mfib_prefix_t *prefix,
                                                mfib_source_t source,
                                                fib_rpf_id_t rpf_id,
                                                mfib_entry_flags_t flags);

/**
 * @brief
 *  Add n paths to an entry (aka route) in the FIB. If the entry does not
 *  exist, it will be created.
 * See the documentation for fib_route_path_t for more descirptions of
 * the path parameters.
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix for the entry to add
 *
 * @param source
 *  The ID of the client/source adding the entry.
 *
 * @param flags
 *  Flags for the entry.
 *
 * @param rpaths
 *  A vector of paths.
 *
 * @return
 *  the index of the fib_entry_t that is created (or existed already).
 */
extern fib_node_index_t mfib_table_entry_path_update(u32 fib_index,
                                                     const mfib_prefix_t *prefix,
                                                     mfib_source_t source,
                                                     const fib_route_path_t *rpath,
                                                     mfib_itf_flags_t flags);

/**
 * @brief
 * Remove n paths to an entry (aka route) in the FIB. If this is the entry's
 * last path, then the entry will be removed, unless it has other sources.
 * See the documentation for fib_route_path_t for more descirptions of
 * the path parameters.
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix for the entry to add
 *
 * @param source
 *  The ID of the client/source adding the entry.
 *
 * @param rpaths
 *  A vector of paths.
 */
extern void mfib_table_entry_path_remove(u32 fib_index,
                                         const mfib_prefix_t *prefix,
                                         mfib_source_t source,
                                         const fib_route_path_t *paths);



/**
 * @brief
 *  Delete a FIB entry. If the entry has no more sources, then it is
 * removed from the table.
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix for the entry to remove
 *
 * @param source
 *  The ID of the client/source adding the entry.
 */
extern void mfib_table_entry_delete(u32 fib_index,
                                    const mfib_prefix_t *prefix,
                                    mfib_source_t source);

/**
 * @brief
 *  Delete a FIB entry. If the entry has no more sources, then it is
 * removed from the table.
 *
 * @param entry_index
 *  The index of the FIB entry
 *
 * @param source
 *  The ID of the client/source adding the entry.
 */
extern void mfib_table_entry_delete_index(fib_node_index_t entry_index,
                                          mfib_source_t source);

/**
 * @brief
 *  Add a 'special' entry to the mFIB that links to the DPO passed
 *  A special entry is an entry that the FIB is not expect to resolve
 *  via the usual mechanisms (i.e. recurisve or neighbour adj DB lookup).
 *  Instead the client/source provides the index of a replicate DPO to link to.
 *
  * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix to add
 *
 * @param source
 *  The ID of the client/source adding the entry.
 *
 * @param flags
 *  Flags for the entry.
 *
 * @param rep_dpo
 *  The replicate DPO index to link to.
 *
 * @return
 *  the index of the fib_entry_t that is created (or existed already).
 */
extern fib_node_index_t mfib_table_entry_special_add(u32 fib_index,
                                                     const mfib_prefix_t *prefix,
                                                     mfib_source_t source,
                                                     mfib_entry_flags_t flags,
                                                     index_t rep_dpo);

/**
 * @brief
 *  Flush all entries from a table for the source
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @paran proto
 *  The protocol of the entries in the table
 *
 * @param source
 *  the source to flush
 */
extern void mfib_table_flush(u32 fib_index,
                             fib_protocol_t proto,
                             mfib_source_t source);

/**
 * @brief
 *  Get the index of the FIB bound to the interface
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param sw_if_index
 *  The interface index
 *
 * @return fib_index
 *  The index of the FIB
 */
extern u32 mfib_table_get_index_for_sw_if_index(fib_protocol_t proto,
                                                u32 sw_if_index);

/**
 * @brief
 *  Get the Table-ID of the FIB from protocol and index
 *
 * @param fib_index
 *  The FIB index
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @return fib_index
 *  The tableID of the FIB
 */
extern u32 mfib_table_get_table_id(u32 fib_index, fib_protocol_t proto);

/**
 * @brief
 *  Get the index of the FIB for a Table-ID. This DOES NOT create the
 * FIB if it does not exist.
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param table-id
 *  The Table-ID
 *
 * @return fib_index
 *  The index of the FIB, which may be INVALID.
 */
extern u32 mfib_table_find(fib_protocol_t proto, u32 table_id);


/**
 * @brief
 *  Get the index of the FIB for a Table-ID. This DOES create the
 * FIB if it does not exist.
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param table-id
 *  The Table-ID
 *
 * @return fib_index
 *  The index of the FIB
 *
 * @param source
 *  The ID of the client/source.
 */
extern u32 mfib_table_find_or_create_and_lock(fib_protocol_t proto,
                                              u32 table_id,
                                              mfib_source_t source);

/**
 * @brief
 *  Get the index of the FIB for a Table-ID. This DOES create the
 * FIB if it does not exist.
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param table-id
 *  The Table-ID
 *
 * @return fib_index
 *  The index of the FIB
 *
 * @param source
 *  The ID of the client/source.
 *
 * @param name
 *  The client is choosing the name they want the table to have
 */
extern u32 mfib_table_find_or_create_and_lock_w_name(fib_protocol_t proto,
                                                     u32 table_id,
                                                     mfib_source_t source,
                                                     const u8 *name);


/**
 * @brief
 * Take a reference counting lock on the table
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param source
 *  The ID of the client/source.
 */
extern void mfib_table_unlock(u32 fib_index,
                              fib_protocol_t proto,
                              mfib_source_t source);

/**
 * @brief
 * Release a reference counting lock on the table. When the last lock
 * has gone. the FIB is deleted.
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @param source
 *  The ID of the client/source.
 */
extern void mfib_table_lock(u32 fib_index,
                            fib_protocol_t proto,
                            mfib_source_t source);

/**
 * @brief
 * Return the number of entries in the FIB added by a given source.
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @paran proto
 *  The protocol of the FIB (and thus the entries therein)
 *
 * @return number of sourced entries.
 */
extern u32 mfib_table_get_num_entries(u32 fib_index,
                                      fib_protocol_t proto);

/**
 * @brief
 *  Get the less specific (covering) prefix
 *
 * @param fib_index
 *  The index of the FIB
 *
 * @param prefix
 *  The prefix to lookup
 *
 * @return
 *  The index of the less specific fib_entry_t.
 */
extern fib_node_index_t mfib_table_get_less_specific(u32 fib_index,
						    const mfib_prefix_t *prefix);

/**
 * @brief
 * Get a pointer to a FIB table
 */
extern mfib_table_t *mfib_table_get(fib_node_index_t index,
                                    fib_protocol_t proto);

/**
 * @brief Call back function when walking entries in a FIB table
 */
typedef int (*mfib_table_walk_fn_t)(fib_node_index_t fei,
                                    void *ctx);

/**
 * @brief Walk all entries in a FIB table
 * N.B: This is NOT safe to deletes. If you need to delete, walk the whole
 * table and store elements in a vector, then delete the elements
 */
extern void mfib_table_walk(u32 fib_index,
                            fib_protocol_t proto,
                            mfib_table_walk_fn_t fn,
                            void *ctx);
/**
 * @brief format (display) the memory usage for mfibs
 */
extern u8 * format_mfib_table_memory(u8 * s, va_list * args);

/**
 * To assit UT
 */
extern u32 mfib_table_get_n_routes(fib_node_index_t index,
                                   fib_protocol_t proto);


#endif
ighlight .nl { color: #f8f8f2 } /* Name.Label */ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ .highlight .nx { color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
#!/usr/bin/env python

import unittest
import socket

from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
    VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
    MRouteItfFlags, MRouteEntryFlags
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface

from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS


class TestMPLS(VppTestCase):
    """ MPLS Test Case """

    def setUp(self):
        super(TestMPLS, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # setup both interfaces
        # assign them different tables.
        table_id = 0

        for i in self.pg_interfaces:
            i.admin_up()
            i.set_table_ip4(table_id)
            i.set_table_ip6(table_id)
            i.config_ip4()
            i.resolve_arp()
            i.config_ip6()
            i.resolve_ndp()
            i.enable_mpls()
            table_id += 1

    def tearDown(self):
        super(TestMPLS, self).tearDown()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.ip6_disable()
            i.admin_down()

    # the default of 64 matches the IP packet TTL default
    def create_stream_labelled_ip4(
            self,
            src_if,
            mpls_labels,
            mpls_ttl=255,
            ping=0,
            ip_itf=None,
            dst_ip=None,
            n=257):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, n):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)

            for ii in range(len(mpls_labels)):
                if ii == len(mpls_labels) - 1:
                    p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
                else:
                    p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
            if not ping:
                if not dst_ip:
                    p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
                else:
                    p = (p / IP(src=src_if.local_ip4, dst=dst_ip) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
            else:
                p = (p / IP(src=ip_itf.remote_ip4,
                            dst=ip_itf.local_ip4) /
                     ICMP())

            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_ip4(self, src_if, dst_ip):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 IP(src=src_if.remote_ip4, dst=dst_ip) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl,
                                   dst_ip=None):
        if dst_ip is None:
            dst_ip = src_if.remote_ip6
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 MPLS(label=mpls_label, ttl=mpls_ttl) /
                 IPv6(src=src_if.remote_ip6, dst=dst_ip) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    @staticmethod
    def verify_filter(capture, sent):
        if not len(capture) == len(sent):
            # filter out any IPv6 RAs from the capture
            for p in capture:
                if p.haslayer(IPv6):
                    capture.remove(p)
        return capture

    def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x800)

                tx_ip = tx[IP]
                rx_ip = rx[IP]

                if not ping_resp:
                    self.assertEqual(rx_ip.src, tx_ip.src)
                    self.assertEqual(rx_ip.dst, tx_ip.dst)
                    # IP processing post pop has decremented the TTL
                    self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
                else:
                    self.assertEqual(rx_ip.src, tx_ip.dst)
                    self.assertEqual(rx_ip.dst, tx_ip.src)

        except:
            raise

    def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
        # the rx'd packet has the MPLS label popped
        eth = rx[Ether]
        self.assertEqual(eth.type, 0x8847)

        rx_mpls = rx[MPLS]

        for ii in range(len(mpls_labels)):
            self.assertEqual(rx_mpls.label, mpls_labels[ii])
            self.assertEqual(rx_mpls.cos, 0)
            if ii == num:
                self.assertEqual(rx_mpls.ttl, ttl)
            else:
                self.assertEqual(rx_mpls.ttl, 255)

            if ii == len(mpls_labels) - 1:
                self.assertEqual(rx_mpls.s, 1)
            else:
                # not end of stack
                self.assertEqual(rx_mpls.s, 0)
                # pop the label to expose the next
                rx_mpls = rx_mpls[MPLS].payload

    def verify_capture_labelled_ip4(self, src_if, capture, sent,
                                    mpls_labels):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                # the MPLS TTL is copied from the IP
                self.verify_mpls_stack(
                    rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)

        except:
            raise

    def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels,
                                    ttl=255, top=None):
        if top is None:
            top = len(mpls_labels) - 1
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                # the MPLS TTL is 255 since it enters a new tunnel
                self.verify_mpls_stack(
                    rx, mpls_labels, ttl, top)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)

        except:
            raise

    def verify_capture_labelled(self, src_if, capture, sent,
                                mpls_labels, ttl=254, num=0):
        try:
            capture = self.verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                rx = capture[i]
                self.verify_mpls_stack(rx, mpls_labels, ttl, num)
        except:
            raise

    def verify_capture_ip6(self, src_if, capture, sent):
        try:
            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x86DD)

                tx_ip = tx[IPv6]
                rx_ip = rx[IPv6]

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)

        except:
            raise

    def send_and_assert_no_replies(self, intf, pkts, remark):
        intf.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        for i in self.pg_interfaces:
            i.assert_nothing_captured(remark=remark)

    def test_swap(self):
        """ MPLS label swap tests """

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[33])])
        route_32_eos.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [32])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33])

        #
        # A simple MPLS xconnect - non-eos label in label out
        #
        route_32_neos = VppMplsRoute(self, 32, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[33])])
        route_32_neos.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])

        #
        # An MPLS xconnect - EOS label in IP out
        #
        route_33_eos = VppMplsRoute(self, 33, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[])])
        route_33_eos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [33])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
        # so this traffic should be dropped.
        #
        route_33_neos = VppMplsRoute(self, 33, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[])])
        route_33_neos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        self.pg0.assert_nothing_captured(
            remark="MPLS non-EOS packets popped and forwarded")

        #
        # A recursive EOS x-connect, which resolves through another x-connect
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_via_label=32,
                                                  labels=[44, 45])])
        route_34_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0, [34])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 45], num=2)

        #
        # A recursive non-EOS x-connect, which resolves through another
        # x-connect
        #
        route_34_neos = VppMplsRoute(self, 34, 0,
                                     [VppRoutePath("0.0.0.0",
                                                   0xffffffff,
                                                   nh_via_label=32,
                                                   labels=[44, 46])])
        route_34_neos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        # it's the 2nd (counting from 0) label in the stack that is swapped
        self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)

        #
        # an recursive IP route that resolves through the recursive non-eos
        # x-connect
        #
        ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                 [VppRoutePath("0.0.0.0",
                                               0xffffffff,
                                               nh_via_label=34,
                                               labels=[55])])
        ip_10_0_0_1.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])

        ip_10_0_0_1.remove_vpp_config()
        route_34_neos.remove_vpp_config()
        route_34_eos.remove_vpp_config()
        route_33_neos.remove_vpp_config()
        route_33_eos.remove_vpp_config()
        route_32_neos.remove_vpp_config()
        route_32_eos.remove_vpp_config()

    def test_bind(self):
        """ MPLS Local Label Binding test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[45])])
        route_10_0_0_1.add_vpp_config()

        # bind a local label to the route
        binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
        binding.add_vpp_config()

        # non-EOS stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])

        # EOS stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [44])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled(self.pg0, rx, tx, [45])

        # IP stream
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])

        #
        # cleanup
        #
        binding.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_imposition(self):
        """ MPLS label imposition test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[32])])
        route_10_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])

        #
        # Add a non-recursive route with a 3 out labels
        #
        route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[32, 33, 34])])
        route_10_0_0_2.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])

        #
        # add a recursive path, with output label, via the 1 label route
        #
        route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
                                    [VppRoutePath("10.0.0.1",
                                                  0xffffffff,
                                                  labels=[44])])
        route_11_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])

        #
        # add a recursive path, with 2 labels, via the 3 label route
        #
        route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
                                    [VppRoutePath("10.0.0.2",
                                                  0xffffffff,
                                                  labels=[44, 45])])
        route_11_0_0_2.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_labelled_ip4(
            self.pg0, rx, tx, [32, 33, 34, 44, 45])

        #
        # cleanup
        #
        route_11_0_0_2.remove_vpp_config()
        route_11_0_0_1.remove_vpp_config()
        route_10_0_0_2.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_tunnel(self):
        """ MPLS Tunnel Tests """

        #
        # Create a tunnel with a single out label
        #
        mpls_tun = VppMPLSTunnelInterface(self,
                                          [VppRoutePath(self.pg0.remote_ip4,
                                                        self.pg0.sw_if_index,
                                                        labels=[44, 46])])
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])

        #
        # add a labelled route through the new tunnel
        #
        route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index,
                                                  labels=[33])])
        route_10_0_0_4.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46, 33],
                                         ttl=63, top=2)

    def test_v4_exp_null(self):
        """ MPLS V4 Explicit NULL test """

        #
        # The first test case has an MPLS TTL of 0
        # all packet should be dropped
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded")

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [0])
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg1, [0])
        self.pg1.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx)

    def test_v6_exp_null(self):
        """ MPLS V6 Explicit NULL test """

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip6(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
        self.pg1.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture()
        self.verify_capture_ip6(self.pg0, rx, tx)

    def test_deag(self):
        """ MPLS Deagg """

        #
        # A de-agg route - next-hop lookup in default table
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=0)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1,
                                             ip_itf=self.pg0)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)

        #
        # A de-agg route - next-hop lookup in non-default table
        #
        route_35_eos = VppMplsRoute(self, 35, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=1)])
        route_35_eos.add_vpp_config()

        #
        # ping an interface in the non-default table
        # PG0 is in the default table. packet arrive labelled in the
        # default table and egress unlabelled in the non-default
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(
            self.pg0, [35], ping=1, ip_itf=self.pg1)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        packet_count = self.get_packet_count_for_if_idx(self.pg0.sw_if_index)
        rx = self.pg1.get_capture(packet_count)
        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)

        #
        # Double pop
        #
        route_36_neos = VppMplsRoute(self, 36, 0,
                                     [VppRoutePath("0.0.0.0",
                                                   0xffffffff)])
        route_36_neos.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [36, 35],
                                             ping=1, ip_itf=self.pg1)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(len(tx))
        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)

        route_36_neos.remove_vpp_config()
        route_35_eos.remove_vpp_config()
        route_34_eos.remove_vpp_config()

    def test_interface_rx(self):
        """ MPLS Interface Receive """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an interface-rx on pg1 and matching a route in table 1
        # if the packet egresses, then we must have swapped to pg1
        # so as to have matched the route in table 1
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  self.pg1.sw_if_index,
                                                  is_interface_rx=1)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34], n=257,
                                             dst_ip="10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

    def test_mcast_mid_point(self):
        """ MPLS Multicast Mid Point """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # Add a mcast entry that replicate to pg2 and pg3
        # and replicate to a interface-rx (like a bud node would)
        #
        route_3400_eos = VppMplsRoute(self, 3400, 1,
                                      [VppRoutePath(self.pg2.remote_ip4,
                                                    self.pg2.sw_if_index,
                                                    labels=[3401]),
                                       VppRoutePath(self.pg3.remote_ip4,
                                                    self.pg3.sw_if_index,
                                                    labels=[3402]),
                                       VppRoutePath("0.0.0.0",
                                                    self.pg1.sw_if_index,
                                                    is_interface_rx=1)],
                                      is_multicast=1)
        route_3400_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [3400], n=257,
                                             dst_ip="10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

        rx = self.pg2.get_capture(257)
        self.verify_capture_labelled(self.pg2, rx, tx, [3401])
        rx = self.pg3.get_capture(257)
        self.verify_capture_labelled(self.pg3, rx, tx, [3402])

    def test_mcast_head(self):
        """ MPLS Multicast Head-end """

        #
        # Create a multicast tunnel with two replications
        #
        mpls_tun = VppMPLSTunnelInterface(self,
                                          [VppRoutePath(self.pg2.remote_ip4,
                                                        self.pg2.sw_if_index,
                                                        labels=[42]),
                                           VppRoutePath(self.pg3.remote_ip4,
                                                        self.pg3.sw_if_index,
                                                        labels=[43])],
                                          is_multicast=1)
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])

        #
        # An an IP multicast route via the tunnel
        # A (*,G).
        # one accepting interface, pg0, 1 forwarding interface via the tunnel
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
            [VppMRoutePath(self.pg0.sw_if_index,
                           MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
             VppMRoutePath(mpls_tun._sw_if_index,
                           MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])

    def test_mcast_ip4_tail(self):
        """ MPLS IPv4 Multicast Tail """

        #
        # Add a multicast route that will forward the traffic
        # post-disposition
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
            table_id=1,
            paths=[VppMRoutePath(self.pg1.sw_if_index,
                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an rpf-id  and matching a route in table 1
        # if the packet egresses, then we must have matched the route in
        # table 1
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  self.pg1.sw_if_index,
                                                  nh_table_id=1,
                                                  rpf_id=55)],
                                    is_multicast=1)

        route_34_eos.add_vpp_config()

        #
        # Drop due to interface lookup miss
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1", n=1)
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")

        #
        # set the RPF-ID of the enrtry to match the input packet's
        #
        route_232_1_1_1.update_rpf_id(55)

        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1", n=257)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

        #
        # set the RPF-ID of the enrtry to not match the input packet's
        #
        route_232_1_1_1.update_rpf_id(56)
        tx = self.create_stream_labelled_ip4(self.pg0, [34],
                                             dst_ip="232.1.1.1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")

    def test_mcast_ip6_tail(self):
        """ MPLS IPv6 Multicast Tail """

        #
        # Add a multicast route that will forward the traffic
        # post-disposition
        #
        route_ff = VppIpMRoute(
            self,
            "::",
            "ff01::1", 32,
            MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
            table_id=1,
            paths=[VppMRoutePath(self.pg1.sw_if_index,
                                 MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
            is_ip6=1)
        route_ff.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an rpf-id  and matching a route in table 1
        # if the packet egresses, then we must have matched the route in
        # table 1
        #
        route_34_eos = VppMplsRoute(
            self, 34, 1,
            [VppRoutePath("::",
                          self.pg1.sw_if_index,
                          nh_table_id=1,
                          rpf_id=55,
                          is_ip6=1)],
            is_multicast=1)

        route_34_eos.add_vpp_config()

        #
        # Drop due to interface lookup miss
        #
        tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
                                             dst_ip="ff01::1")

        #
        # set the RPF-ID of the enrtry to match the input packet's
        #
        route_ff.update_rpf_id(55)

        tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
                                             dst_ip="ff01::1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip6(self.pg1, rx, tx)

        #
        # set the RPF-ID of the enrtry to not match the input packet's
        #
        route_ff.update_rpf_id(56)
        tx = self.create_stream_labelled_ip6(self.pg0, [34], 225,
                                             dst_ip="ff01::1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")


class TestMPLSDisabled(VppTestCase):
    """ MPLS disabled """

    def setUp(self):
        super(TestMPLSDisabled, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(2))

        # PG0 is MPLS enalbed
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()

        # PG 1 is not MPLS enabled
        self.pg1.admin_up()

    def tearDown(self):
        super(TestMPLSDisabled, self).tearDown()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.admin_down()

    def send_and_assert_no_replies(self, intf, pkts, remark):
        intf.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()
        for i in self.pg_interfaces:
            i.get_capture(0)
            i.assert_nothing_captured(remark=remark)

    def test_mpls_disabled(self):
        """ MPLS Disabled """

        tx = (Ether(src=self.pg1.remote_mac,
                    dst=self.pg1.local_mac) /
              MPLS(label=32, ttl=64) /
              IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
              UDP(sport=1234, dport=1234) /
              Raw('\xa5' * 100))

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[33])])
        route_32_eos.add_vpp_config()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")

        #
        # MPLS enable PG1
        #
        self.pg1.enable_mpls()

        #
        # Now we get packets through
        #
        self.pg1.add_stream(tx)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture(1)

        #
        # Disable PG1
        #
        self.pg1.disable_mpls()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")


class TestMPLSPIC(VppTestCase):
    """ MPLS PIC edge convergence """

    def setUp(self):
        super(TestMPLSPIC, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # core links
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()
        self.pg1.admin_up()
        self.pg1.config_ip4()
        self.pg1.resolve_arp()
        self.pg1.enable_mpls()

        # VRF (customer facing) link
        self.pg2.admin_up()
        self.pg2.set_table_ip4(1)
        self.pg2.config_ip4()
        self.pg2.resolve_arp()
        self.pg2.set_table_ip6(1)
        self.pg2.config_ip6()
        self.pg2.resolve_ndp()
        self.pg3.admin_up()
        self.pg3.set_table_ip4(1)
        self.pg3.config_ip4()
        self.pg3.resolve_arp()
        self.pg3.set_table_ip6(1)
        self.pg3.config_ip6()
        self.pg3.resolve_ndp()

    def tearDown(self):
        super(TestMPLSPIC, self).tearDown()
        self.pg0.disable_mpls()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.set_table_ip4(0)
            i.set_table_ip6(0)
            i.admin_down()

    def test_mpls_ibgp_pic(self):
        """ MPLS iBGP PIC edge convergence

        1) setup many iBGP VPN routes via a pair of iBGP peers.
        2) Check EMCP forwarding to these peers
        3) withdraw the IGP route to one of these peers.
        4) check forwarding continues to the remaining peer
        """

        #
        # IGP+LDP core routes
        #
        core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[45])])
        core_10_0_0_45.add_vpp_config()

        core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[46])])
        core_10_0_0_46.add_vpp_config()

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        pkts = []
        for ii in range(64):
            dst = "192.168.1.%d" % ii
            vpn_routes.append(VppIpRoute(self, dst, 32,
                                         [VppRoutePath("10.0.0.45",
                                                       0xffffffff,
                                                       labels=[145],
                                                       is_resolve_host=1),
                                          VppRoutePath("10.0.0.46",
                                                       0xffffffff,
                                                       labels=[146],
                                                       is_resolve_host=1)],
                                         table_id=1))
            vpn_routes[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg2.local_mac,
                              src=self.pg2.remote_mac) /
                        IP(src=self.pg2.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        #
        # Send the packet stream (one pkt to each VPN route)
        #  - expect a 50-50 split of the traffic
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(1)
        rx1 = self.pg1._get_capture(1)

        # not testig the LB hashing algorithm so we're not concerned
        # with the split ratio, just as long as neither is 0
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # Withdraw one of the IGP routes
        #
        core_10_0_0_46.remove_vpp_config()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.vapi.ppcli("clear trace")
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")

        #
        # packets should still be forwarded through the remaining peer
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(64)

        #
        # Add the IGP route back and we return to load-balancing
        #
        core_10_0_0_46.add_vpp_config()

        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(1)
        rx1 = self.pg1._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

    def test_mpls_ebgp_pic(self):
        """ MPLS eBGP PIC edge convergence

        1) setup many eBGP VPN routes via a pair of eBGP peers
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(64):
            dst = "192.168.1.%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(self, dst, 32,
                                         [VppRoutePath(self.pg2.remote_ip4,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1),
                                          VppRoutePath(self.pg3.remote_ip4,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1)],
                                         table_id=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
                                              ip_table_id=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IP(src=self.pg0.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        #
        self.pg2.unconfig_ip4()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # put the connecteds back
        #
        self.pg2.config_ip4()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

    def test_mpls_v6_ebgp_pic(self):
        """ MPLSv6 eBGP PIC edge convergence

        1) setup many eBGP VPNv6 routes via a pair of eBGP peers
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(64):
            dst = "3000::%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(self, dst, 128,
                                         [VppRoutePath(self.pg2.remote_ip6,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_resolve_attached=1,
                                                       is_ip6=1),
                                          VppRoutePath(self.pg3.remote_ip6,
                                                       0xffffffff,
                                                       nh_table_id=1,
                                                       is_ip6=1,
                                                       is_resolve_attached=1)],
                                         table_id=1,
                                         is_ip6=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
                                              ip_table_id=1,
                                              is_ip6=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IPv6(src=self.pg0.remote_ip6, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw('\xa5' * 100))

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (psot-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        # and shutdown the interface so the ND cache is flushed.
        #
        self.pg2.unconfig_ip6()
        self.pg2.admin_down()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(len(pkts))

        #
        # put the connecteds back
        #
        self.pg2.admin_up()
        self.pg2.config_ip6()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(1)
        rx1 = self.pg3._get_capture(1)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)