aboutsummaryrefslogtreecommitdiffstats
path: root/test/test_mpls.py
blob: 2c40e8d198e9cb3e15c190f93704213cc7b45e03 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
#!/usr/bin/python


import os
import stat
import sys
import time
import outer_packages
import zmq
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import jsonrpclib
from jsonrpclib import Fault
import binascii
import socket
import errno
import signal
from common.trex_status_e import TRexStatus
from common.trex_exceptions import *
import subprocess
from random import randrange
import logging
import threading
import CCustomLogger
from trex_launch_thread import AsynchronousTRexSession
from zmq_monitor_thread import ZmqMonitorSession
from argparse import ArgumentParser, RawTextHelpFormatter
from json import JSONEncoder
import re
import shlex
import tempfile

try:
    from .singleton_daemon import register_socket, run_command
except:
    from singleton_daemon import register_socket, run_command


# setup the logger
CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')

class CTRexServer(object):
    """This class defines the server side of the RESTfull interaction with TRex"""
    DEFAULT_TREX_PATH = '/auto/proj-pcube-b/apps/PL-b/tools/bp_sim2/v1.55/' #'/auto/proj-pcube-b/apps/PL-b/tools/nightly/trex_latest'
    TREX_START_CMD    = './t-rex-64'
    DEFAULT_FILE_PATH = '/tmp/trex_files/'

    def __init__(self, trex_path, trex_files_path, trex_host='0.0.0.0', trex_daemon_port=8090, trex_zmq_port=4500, trex_nice=-19):
        """ 
        Parameters
        ----------
        trex_host : str
            a string of the TRex ip address or hostname.
            default value: machine hostname as fetched from socket.gethostname()
        trex_daemon_port : int
            the port number on which the trex-daemon server can be reached
            default value: 8090
        trex_zmq_port : int
            the port number on which trex's zmq module will interact with daemon server
            default value: 4500
        nice: int
            priority of the TRex process

        Instantiate a TRex client object, and connecting it to listening daemon-server
        """
        self.TREX_PATH          = os.path.abspath(os.path.dirname(trex_path+'/'))
        self.trex_files_path    = os.path.abspath(os.path.dirname(trex_files_path+'/'))
        self.__check_trex_path_validity()
        self.__check_files_path_validity()
        self.trex               = CTRex()
        self.trex_version       = None
        self.trex_host          = trex_host
        self.trex_daemon_port   = trex_daemon_port
        self.trex_zmq_port      = trex_zmq_port
        self.trex_server_path   = "http://{hostname}:{port}".format( hostname = trex_host, port = trex_daemon_port )
        self.start_lock         = threading.Lock()
        self.__reservation      = None
        self.zmq_monitor        = ZmqMonitorSession(self.trex, self.trex_zmq_port)    # intiate single ZMQ monitor thread for server usage
        self.trex_nice          = int(trex_nice)
        if self.trex_nice < -20 or self.trex_nice > 19:
            err = "Parameter 'nice' should be integer in range [-20, 19]"
            print(err)
            logger.error(err)
            raise Exception(err)
    
    def add(self, x, y):
        logger.info("Processing add function. Parameters are: {0}, {1} ".format( x, y ))
        return x + y
        # return Fault(-10, "")

    def push_file (self, filename, bin_data):
        logger.info("Processing push_file() command.")
        try:
            filepath = os.path.join(self.trex_files_path, os.path.basename(filename))
            with open(filepath, 'wb') as f:
                f.write(binascii.a2b_base64(bin_data))
            logger.info("push_file() command finished. File is saved as %s" % filepath)
            return True
        except IOError as inst:
            logger.error("push_file method failed. " + str(inst))
            return False

    def connectivity_check (self):
        logger.info("Processing connectivity_check function.")
        return True

    def start(self):
        """This method fires up the daemon server based on initialized parameters of the class"""
        # initialize the server instance with given resources
        register_socket('trex_daemon_server')
        try:
            print("Firing up TRex REST daemon @ port {trex_port} ...\n".format( trex_port = self.trex_daemon_port ))
            logger.info("Firing up TRex REST daemon @ port {trex_port} ...".format( trex_port = self.trex_daemon_port ))
            logger.info("current working dir is: {0}".format(self.TREX_PATH) )
            logger.info("current files dir is  : {0}".format(self.trex_files_path) )
            logger.debug("Starting TRex server. Registering methods to process.")
            logger.info(self.get_trex_version(base64 = False))
            self.server = SimpleJSONRPCServer( (self.trex_host, self.trex_daemon_port) )
        except socket.error as e:
            if e.errno == errno.EADDRINUSE:
                logger.error("TRex server requested address already in use. Aborting server launching.")
                print("TRex server requested address already in use. Aborting server launching.")
                raise socket.error(errno.EADDRINUSE, "TRex daemon requested address already in use. "
                                                     "Server launch aborted. Please make sure no other process is "
                                                     "using the desired server properties.")
            elif isinstance(e, socket.gaierror) and e.errno == -3:
                # handling Temporary failure in name resolution exception
                raise socket.gaierror(-3, "Temporary failure in name resolution.\n"
                                          "Make sure provided hostname has DNS resolving.")
            else:
                raise

        # set further functionality and peripherals to server instance 
        self.server.register_function(self.add)
        self.server.register_function(self.cancel_reservation)
        self.server.register_function(self.connectivity_check)
        self.server.register_function(self.connectivity_check, 'check_connectivity') # alias
        self.server.register_function(self.force_trex_kill)
        self.server.register_function(self.get_file)
        self.server.register_function(self.get_files_list)
        self.server.register_function(self.get_files_path)
        self.server.register_function(self.get_running_info)
        self.server.register_function(self.get_running_status)
        self.server.register_function(self.get_trex_cmds)
        self.server.register_function(self.get_trex_config)
        self.server.register_function(self.get_trex_daemon_log)
        self.server.register_function(self.get_trex_log)
        self.server.register_function(self.get_trex_version)
        self.server.register_function(self.is_reserved)
        self.server.register_function(self.is_running)
        self.server.register_function(self.kill_all_trexes)
        self.server.register_function(self.push_file)
        self.server.register_function(self.reserve_trex)
        self.server.register_function(self.start_trex)
        self.server.register_function(self.stop_trex)
        self.server.register_function(self.wait_until_kickoff_finish)
        signal.signal(signal.SIGTSTP, self.stop_handler)
        signal.signal(signal.SIGTERM, self.stop_handler)
        try:
            self.zmq_monitor.start()
            self.server.serve_forever()
        except KeyboardInterrupt:
            logger.info("Daemon shutdown request detected." )
        finally:
            self.zmq_monitor.join()            # close ZMQ monitor thread resources
            self.server.shutdown()
            #self.server.server_close()


    # get files from Trex server and return their content (mainly for logs)
    @staticmethod
    def _pull_file(filepath):
        try:
            with open(filepath, 'rb') as f:
                file_content = f.read()
                return binascii.b2a_base64(file_content).decode(errors='replace')
        except Exception as e:
            err_str = "Can't get requested file %s: %s" % (filepath, e)
            logger.error(err_str)
            return Fault(-33, err_str)

    # returns True if given path is under TRex package or under /tmp/trex_files
    def _check_path_under_TRex_or_temp(self, path):
        if not os.path.relpath(path, self.trex_files_path).startswith(os.pardir):
            return True
        if not os.path.relpath(path, self.TREX_PATH).startswith(os.pardir):
            return True
        return False

    # gets the file content encoded base64 either from /tmp/trex_files or TRex server dir
    def get_file(self, filepath):
        try:
            logger.info("Processing get_file() command.")
            if not self._check_path_under_TRex_or_temp(filepath):
                raise Exception('Given path should be under current TRex package or /tmp/trex_files')
            return self._pull_file(filepath)
        except Exception as e:
            err_str = "Can't get requested file %s: %s" % (filepath, e)
            logger.error(err_str)
            return Fault(-33, err_str)

    # get tuple (dirs, files) with directories and files lists from given path (limited under TRex package or /tmp/trex_files)
    def get_files_list(self, path):
        try:
            logger.info("Processing get_files_list() command, given path: %s" % path)
            if not self._check_path_under_TRex_or_temp(path):
                raise Exception('Given path should be under current TRex package or /tmp/trex_files')
            return os.walk(path).next()[1:3]
        except Exception as e:
            err_str = "Error processing get_files_list(): %s" % e
            logger.error(err_str)
            return Fault(-33, err_str)

    # get Trex log /tmp/trex.txt
    def get_trex_log(self):
        logger.info("Processing get_trex_log() command.")
        return self._pull_file('/tmp/trex.txt')

    # get /etc/trex_cfg.yaml
    def get_trex_config(self):
        logger.info("Processing get_trex_config() command.")
        return self._pull_file('/etc/trex_cfg.yaml')

    # get daemon log /var/log/trex/trex_daemon_server.log
    def get_trex_daemon_log (self):
        logger.info("Processing get_trex_daemon_log() command.")
        return self._pull_file('/var/log/trex/trex_daemon_server.log')
        
    # get Trex version from ./t-rex-64 --help (last lines starting with "Version : ...")
    def get_trex_version (self, base64 = True):
        try:
            logger.info("Processing get_trex_version() command.")
            if not self.trex_version:
                ret_code, stdout, stderr = run_command('./t-rex-64 --help', cwd = self.TREX_PATH)
                search_result = re.search('\n\s*(Version\s*:.+)', stdout, re.DOTALL)
                if not search_result:
                    raise Exception('Could not determine version from ./t-rex-64 --help')
                self.trex_version = binascii.b2a_base64(search_result.group(1).encode(errors='replace'))
            if base64:
                return self.trex_version.decode(errors='replace')
            else:
                return binascii.a2b_base64(self.trex_version).decode(errors='replace')
        except Exception as e:
            err_str = "Can't get trex version, error: %s" % e
            logger.error(err_str)
            return Fault(-33, err_str)

    def stop_handler (self, *args, **kwargs):
        logger.info("Daemon STOP request detected.")
        if self.is_running():
            # in case TRex process is currently running, stop it before terminating server process
            self.stop_trex(self.trex.get_seq())
        sys.exit(0)

    def assert_zmq_ok(self):
        if self.trex.zmq_error:
            raise Exception('ZMQ thread got error: %s' % self.trex.zmq_error)
        if not self.zmq_monitor.is_alive():
            if self.trex.get_status() != TRexStatus.Idle:
                self.force_trex_kill()
            raise Exception('ZMQ thread is dead.')

    def is_running (self):
        run_status = self.trex.get_status()
        logger.info("Processing is_running() command. Running status is: {stat}".format(stat = run_status) )
        if run_status==TRexStatus.Running:
            return True
        else:
            return False

    def is_reserved (self):
        logger.info("Processing is_reserved() command.")
        return bool(self.__reservation)

    def get_running_status (self):
        run_status = self.trex.get_status()
        logger.info("Processing get_running_status() command. Running status is: {stat}".format(stat = run_status) )
        return { 'state' : run_status.value, 'verbose' : self.trex.get_verbose_status() }

    def get_files_path (self):
        logger.info("Processing get_files_path() command." )
        return self.trex_files_path

    def reserve_trex (self, user):
        if user == "":
            logger.info("TRex reservation cannot apply to empty string user. Request denied.")
            return Fault(-33, "TRex reservation cannot apply to empty string user. Request denied.")

        with self.start_lock:
            logger.info("Processing reserve_trex() command.")
            if self.is_reserved():
                if user == self.__reservation['user']:  
                    # return True is the same user is asking and already has the resrvation
                    logger.info("the same user is asking and already has the resrvation. Re-reserving TRex.")
                    return True

                logger.info("TRex is already reserved to another user ({res_user}), cannot reserve to another user.".format( res_user = self.__reservation['user'] ))
                return Fault(-33, "TRex is already reserved to another user ({res_user}). Please make sure TRex is free before reserving it.".format(
                    res_user = self.__reservation['user']) )  # raise at client TRexInUseError
            elif self.trex.get_status() != TRexStatus.Idle:
                logger.info("TRex is currently running, cannot reserve TRex unless in Idle state.")
                return Fault(-13, 'TRex is currently running, cannot reserve TRex unless in Idle state. Please try again when TRex run finished.')  # raise at client TRexInUseError
            else:
                logger.info("TRex is now reserved for user ({res_user}).".format( res_user = user ))
                self.__reservation = {'user' : user, 'since' : time.ctime()}
                logger.debug("Reservation details: "+ str(self.__reservation))
                return True

    def cancel_reservation (self, user):
        with self.start_lock:
            logger.info("Processing cancel_reservation() command.")
            if self.is_reserved():
                if self.__reservation['user'] == user:
                    logger.info("TRex reservation to {res_user} has been canceled successfully.".format(res_user = self.__reservation['user']))
                    self.__reservation = None
                    return True
                else:
                    logger.warning("TRex is reserved to different user than the provided one. Reservation wasn't canceled.")
                    return Fault(-33, "Cancel reservation request is available to the user that holds the reservation. Request denied")  # raise at client TRexRequestDenied
            
            else:
               
#!/usr/bin/env python3

import unittest
import socket

from framework import tag_fixme_vpp_workers
from framework import VppTestCase, VppTestRunner
from vpp_ip import DpoProto, INVALID_INDEX
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
    VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
    VppIpTable, VppMplsTable, \
    VppMplsLabel, MplsLspMode, find_mpls_route, \
    FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
from vpp_papi import VppEnum

import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
from scapy.contrib.mpls import MPLS

NUM_PKTS = 67

# scapy removed these attributes.
# we asked that they be restored: https://github.com/secdev/scapy/pull/1878
# semantic names have more meaning than numbers. so here they are.
ARP.who_has = 1
ARP.is_at = 2


def verify_filter(capture, sent):
    if not len(capture) == len(sent):
        # filter out any IPv6 RAs from the capture
        for p in capture:
            if p.haslayer(IPv6):
                capture.remove(p)
    return capture


def verify_mpls_stack(tst, rx, mpls_labels):
    # the rx'd packet has the MPLS label popped
    eth = rx[Ether]
    tst.assertEqual(eth.type, 0x8847)

    rx_mpls = rx[MPLS]

    for ii in range(len(mpls_labels)):
        tst.assertEqual(rx_mpls.label, mpls_labels[ii].value)
        tst.assertEqual(rx_mpls.cos, mpls_labels[ii].exp)
        tst.assertEqual(rx_mpls.ttl, mpls_labels[ii].ttl)

        if ii == len(mpls_labels) - 1:
            tst.assertEqual(rx_mpls.s, 1)
        else:
            # not end of stack
            tst.assertEqual(rx_mpls.s, 0)
            # pop the label to expose the next
            rx_mpls = rx_mpls[MPLS].payload


@tag_fixme_vpp_workers
class TestMPLS(VppTestCase):
    """ MPLS Test Case """

    @classmethod
    def setUpClass(cls):
        super(TestMPLS, cls).setUpClass()

    @classmethod
    def tearDownClass(cls):
        super(TestMPLS, cls).tearDownClass()

    def setUp(self):
        super(TestMPLS, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        # setup both interfaces
        # assign them different tables.
        table_id = 0
        self.tables = []

        tbl = VppMplsTable(self, 0)
        tbl.add_vpp_config()
        self.tables.append(tbl)

        for i in self.pg_interfaces:
            i.admin_up()

            if table_id != 0:
                tbl = VppIpTable(self, table_id)
                tbl.add_vpp_config()
                self.tables.append(tbl)
                tbl = VppIpTable(self, table_id, is_ip6=1)
                tbl.add_vpp_config()
                self.tables.append(tbl)

            i.set_table_ip4(table_id)
            i.set_table_ip6(table_id)
            i.config_ip4()
            i.resolve_arp()
            i.config_ip6()
            i.resolve_ndp()
            i.enable_mpls()
            table_id += 1

    def tearDown(self):
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.set_table_ip4(0)
            i.set_table_ip6(0)
            i.disable_mpls()
            i.admin_down()
        super(TestMPLS, self).tearDown()

    # the default of 64 matches the IP packet TTL default
    def create_stream_labelled_ip4(
            self,
            src_if,
            mpls_labels,
            ping=0,
            ip_itf=None,
            dst_ip=None,
            chksum=None,
            ip_ttl=64,
            n=257):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, n):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)

            for ii in range(len(mpls_labels)):
                p = p / MPLS(label=mpls_labels[ii].value,
                             ttl=mpls_labels[ii].ttl,
                             cos=mpls_labels[ii].exp)
            if not ping:
                if not dst_ip:
                    p = (p / IP(src=src_if.local_ip4,
                                dst=src_if.remote_ip4,
                                ttl=ip_ttl) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
                else:
                    p = (p / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) /
                         UDP(sport=1234, dport=1234) /
                         Raw(payload))
            else:
                p = (p / IP(src=ip_itf.remote_ip4,
                            dst=ip_itf.local_ip4,
                            ttl=ip_ttl) /
                     ICMP())

            if chksum:
                p[IP].chksum = chksum
            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64,
                          ip_dscp=0, payload_size=None):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 IP(src=src_if.remote_ip4, dst=dst_ip,
                    ttl=ip_ttl, tos=ip_dscp) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            if payload_size:
                self.extend_packet(p, payload_size)
            pkts.append(p)
        return pkts

    def create_stream_ip6(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0):
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
                 IPv6(src=src_if.remote_ip6, dst=dst_ip,
                      hlim=ip_ttl, tc=ip_dscp) /
                 UDP(sport=1234, dport=1234) /
                 Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    def create_stream_labelled_ip6(self, src_if, mpls_labels,
                                   hlim=64, dst_ip=None):
        if dst_ip is None:
            dst_ip = src_if.remote_ip6
        self.reset_packet_infos()
        pkts = []
        for i in range(0, 257):
            info = self.create_packet_info(src_if, src_if)
            payload = self.info_to_payload(info)
            p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
            for l in mpls_labels:
                p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp)

            p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) /
                     UDP(sport=1234, dport=1234) /
                     Raw(payload))
            info.data = p.copy()
            pkts.append(p)
        return pkts

    def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0,
                           ip_ttl=None, ip_dscp=0):
        try:
            capture = verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x800)

                tx_ip = tx[IP]
                rx_ip = rx[IP]

                if not ping_resp:
                    self.assertEqual(rx_ip.src, tx_ip.src)
                    self.assertEqual(rx_ip.dst, tx_ip.dst)
                    self.assertEqual(rx_ip.tos, ip_dscp)
                    if not ip_ttl:
                        # IP processing post pop has decremented the TTL
                        self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
                    else:
                        self.assertEqual(rx_ip.ttl, ip_ttl)
                else:
                    self.assertEqual(rx_ip.src, tx_ip.dst)
                    self.assertEqual(rx_ip.dst, tx_ip.src)

        except:
            raise

    def verify_capture_labelled_ip4(self, src_if, capture, sent,
                                    mpls_labels, ip_ttl=None):
        try:
            capture = verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                verify_mpls_stack(self, rx, mpls_labels)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                if not ip_ttl:
                    # IP processing post pop has decremented the TTL
                    self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
                else:
                    self.assertEqual(rx_ip.ttl, ip_ttl)

        except:
            raise

    def verify_capture_labelled_ip6(self, src_if, capture, sent,
                                    mpls_labels, ip_ttl=None):
        try:
            capture = verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IPv6]
                rx_ip = rx[IPv6]

                verify_mpls_stack(self, rx, mpls_labels)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                if not ip_ttl:
                    # IP processing post pop has decremented the TTL
                    self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
                else:
                    self.assertEqual(rx_ip.hlim, ip_ttl)

        except:
            raise

    def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
        try:
            capture = verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                verify_mpls_stack(self, rx, mpls_labels)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                # IP processing post pop has decremented the TTL
                self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)

        except:
            raise

    def verify_capture_labelled(self, src_if, capture, sent,
                                mpls_labels):
        try:
            capture = verify_filter(capture, sent)

            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                rx = capture[i]
                verify_mpls_stack(self, rx, mpls_labels)
        except:
            raise

    def verify_capture_ip6(self, src_if, capture, sent,
                           ip_hlim=None, ip_dscp=0):
        try:
            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x86DD)

                tx_ip = tx[IPv6]
                rx_ip = rx[IPv6]

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                self.assertEqual(rx_ip.tc,  ip_dscp)
                # IP processing post pop has decremented the TTL
                if not ip_hlim:
                    self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
                else:
                    self.assertEqual(rx_ip.hlim, ip_hlim)

        except:
            raise

    def verify_capture_ip6_icmp(self, src_if, capture, sent):
        try:
            self.assertEqual(len(capture), len(sent))

            for i in range(len(capture)):
                tx = sent[i]
                rx = capture[i]

                # the rx'd packet has the MPLS label popped
                eth = rx[Ether]
                self.assertEqual(eth.type, 0x86DD)

                tx_ip = tx[IPv6]
                rx_ip = rx[IPv6]

                self.assertEqual(rx_ip.dst, tx_ip.src)
                # ICMP sourced from the interface's address
                self.assertEqual(rx_ip.src, src_if.local_ip6)
                # hop-limit reset to 255 for IMCP packet
                self.assertEqual(rx_ip.hlim, 255)

                icmp = rx[ICMPv6TimeExceeded]

        except:
            raise

    def verify_capture_fragmented_labelled_ip4(self, src_if, capture, sent,
                                               mpls_labels, ip_ttl=None):
        try:
            capture = verify_filter(capture, sent)

            for i in range(len(capture)):
                tx = sent[0]
                rx = capture[i]
                tx_ip = tx[IP]
                rx_ip = rx[IP]

                verify_mpls_stack(self, rx, mpls_labels)

                self.assertEqual(rx_ip.src, tx_ip.src)
                self.assertEqual(rx_ip.dst, tx_ip.dst)
                if not ip_ttl:
                    # IP processing post pop has decremented the TTL
                    self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
                else:
                    self.assertEqual(rx_ip.ttl, ip_ttl)

        except:
            raise

    def test_swap(self):
        """ MPLS label swap tests """

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(33)])])
        route_32_eos.add_vpp_config()

        self.assertTrue(
            find_mpls_route(self, 0, 32, 1,
                            [VppRoutePath(self.pg0.remote_ip4,
                                          self.pg0.sw_if_index,
                                          labels=[VppMplsLabel(33)])]))

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(32, ttl=32, exp=1)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(33, ttl=31, exp=1)])

        self.assertEqual(route_32_eos.get_stats_to()['packets'], 257)

        #
        # A simple MPLS xconnect - non-eos label in label out
        #
        route_32_neos = VppMplsRoute(self, 32, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[VppMplsLabel(33)])])
        route_32_neos.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(32, ttl=21, exp=7),
                                              VppMplsLabel(99)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(33, ttl=20, exp=7),
                                      VppMplsLabel(99)])
        self.assertEqual(route_32_neos.get_stats_to()['packets'], 257)

        #
        # A simple MPLS xconnect - non-eos label in label out, uniform mode
        #
        route_42_neos = VppMplsRoute(
            self, 42, 0,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(43, MplsLspMode.UNIFORM)])])
        route_42_neos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(42, ttl=21, exp=7),
                                              VppMplsLabel(99)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(43, ttl=20, exp=7),
                                      VppMplsLabel(99)])

        #
        # An MPLS xconnect - EOS label in IP out
        #
        route_33_eos = VppMplsRoute(self, 33, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[])])
        route_33_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # disposed packets have an invalid IPv4 checksum
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)],
                                             dst_ip=self.pg0.remote_ip4,
                                             n=65,
                                             chksum=1)
        self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")

        #
        # An MPLS xconnect - EOS label in IP out, uniform mode
        #
        route_3333_eos = VppMplsRoute(
            self, 3333, 1,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])])
        route_3333_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(
            self.pg0,
            [VppMplsLabel(3333, ttl=55, exp=3)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=54, ip_dscp=0x60)
        tx = self.create_stream_labelled_ip4(
            self.pg0,
            [VppMplsLabel(3333, ttl=66, exp=4)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=65, ip_dscp=0x80)

        #
        # An MPLS xconnect - EOS label in IPv6 out
        #
        route_333_eos = VppMplsRoute(
            self, 333, 1,
            [VppRoutePath(self.pg0.remote_ip6,
                          self.pg0.sw_if_index,
                          labels=[])],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
        route_333_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6(self.pg0, rx, tx)

        #
        # disposed packets have an TTL expired
        #
        tx = self.create_stream_labelled_ip6(self.pg0,
                                             [VppMplsLabel(333, ttl=64)],
                                             dst_ip=self.pg1.remote_ip6,
                                             hlim=1)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6_icmp(self.pg0, rx, tx)

        #
        # An MPLS xconnect - EOS label in IPv6 out w imp-null
        #
        route_334_eos = VppMplsRoute(
            self, 334, 1,
            [VppRoutePath(self.pg0.remote_ip6,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(3)])],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
        route_334_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip6(self.pg0,
                                             [VppMplsLabel(334, ttl=64)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6(self.pg0, rx, tx)

        #
        # An MPLS xconnect - EOS label in IPv6 out w imp-null in uniform mode
        #
        route_335_eos = VppMplsRoute(
            self, 335, 1,
            [VppRoutePath(self.pg0.remote_ip6,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
        route_335_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip6(
            self.pg0,
            [VppMplsLabel(335, ttl=27, exp=4)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6(self.pg0, rx, tx, ip_hlim=26, ip_dscp=0x80)

        #
        # disposed packets have an TTL expired
        #
        tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334)],
                                             dst_ip=self.pg1.remote_ip6,
                                             hlim=0)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6_icmp(self.pg0, rx, tx)

        #
        # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
        # so this traffic should be dropped.
        #
        route_33_neos = VppMplsRoute(self, 33, 0,
                                     [VppRoutePath(self.pg0.remote_ip4,
                                                   self.pg0.sw_if_index,
                                                   labels=[])])
        route_33_neos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(33),
                                              VppMplsLabel(99)])
        self.send_and_assert_no_replies(
            self.pg0, tx,
            "MPLS non-EOS packets popped and forwarded")

        #
        # A recursive EOS x-connect, which resolves through another x-connect
        # in pipe mode
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_via_label=32,
                                                  labels=[VppMplsLabel(44),
                                                          VppMplsLabel(45)])])
        route_34_eos.add_vpp_config()
        self.logger.info(self.vapi.cli("sh mpls fib 34"))

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(34, ttl=3)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(33),
                                      VppMplsLabel(44),
                                      VppMplsLabel(45, ttl=2)])

        self.assertEqual(route_34_eos.get_stats_to()['packets'], 257)
        self.assertEqual(route_32_neos.get_stats_via()['packets'], 257)

        #
        # A recursive EOS x-connect, which resolves through another x-connect
        # in uniform mode
        #
        route_35_eos = VppMplsRoute(
            self, 35, 1,
            [VppRoutePath("0.0.0.0",
                          0xffffffff,
                          nh_via_label=42,
                          labels=[VppMplsLabel(44)])])
        route_35_eos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(35, ttl=3)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(43, ttl=2),
                                      VppMplsLabel(44, ttl=2)])

        #
        # A recursive non-EOS x-connect, which resolves through another
        # x-connect
        #
        route_34_neos = VppMplsRoute(self, 34, 0,
                                     [VppRoutePath("0.0.0.0",
                                                   0xffffffff,
                                                   nh_via_label=32,
                                                   labels=[VppMplsLabel(44),
                                                           VppMplsLabel(46)])])
        route_34_neos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(34, ttl=45),
                                              VppMplsLabel(99)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        # it's the 2nd (counting from 0) label in the stack that is swapped
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(33),
                                      VppMplsLabel(44),
                                      VppMplsLabel(46, ttl=44),
                                      VppMplsLabel(99)])

        #
        # an recursive IP route that resolves through the recursive non-eos
        # x-connect
        #
        ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                 [VppRoutePath("0.0.0.0",
                                               0xffffffff,
                                               nh_via_label=34,
                                               labels=[VppMplsLabel(55)])])
        ip_10_0_0_1.add_vpp_config()

        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(33),
                                          VppMplsLabel(44),
                                          VppMplsLabel(46),
                                          VppMplsLabel(55)])
        self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257)

        ip_10_0_0_1.remove_vpp_config()
        route_34_neos.remove_vpp_config()
        route_34_eos.remove_vpp_config()
        route_33_neos.remove_vpp_config()
        route_33_eos.remove_vpp_config()
        route_32_neos.remove_vpp_config()
        route_32_eos.remove_vpp_config()

    def test_bind(self):
        """ MPLS Local Label Binding test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(45)])])
        route_10_0_0_1.add_vpp_config()

        # bind a local label to the route
        binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
        binding.add_vpp_config()

        # non-EOS stream
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(44),
                                              VppMplsLabel(99)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(45, ttl=63),
                                      VppMplsLabel(99)])

        # EOS stream
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(44)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled(self.pg0, rx, tx,
                                     [VppMplsLabel(45, ttl=63)])

        # IP stream
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(45)])

        #
        # cleanup
        #
        binding.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_imposition(self):
        """ MPLS label imposition test """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(32)])])
        route_10_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx, [VppMplsLabel(32)])

        #
        # Add a non-recursive route with a 3 out labels
        #
        route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(32),
                                                          VppMplsLabel(33),
                                                          VppMplsLabel(34)])])
        route_10_0_0_2.add_vpp_config()

        tx = self.create_stream_ip4(self.pg0, "10.0.0.2",
                                    ip_ttl=44, ip_dscp=0xff)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(32),
                                          VppMplsLabel(33),
                                          VppMplsLabel(34)],
                                         ip_ttl=43)

        #
        # Add a non-recursive route with a single out label in uniform mode
        #
        route_10_0_0_3 = VppIpRoute(
            self, "10.0.0.3", 32,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(32,
                                               mode=MplsLspMode.UNIFORM)])])
        route_10_0_0_3.add_vpp_config()

        tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
                                    ip_ttl=54, ip_dscp=0xbe)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(32, ttl=53, exp=5)])

        #
        # Add a IPv6 non-recursive route with a single out label in
        # uniform mode
        #
        route_2001_3 = VppIpRoute(
            self, "2001::3", 128,
            [VppRoutePath(self.pg0.remote_ip6,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(32,
                                               mode=MplsLspMode.UNIFORM)])])
        route_2001_3.add_vpp_config()

        tx = self.create_stream_ip6(self.pg0, "2001::3",
                                    ip_ttl=54, ip_dscp=0xbe)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip6(self.pg0, rx, tx,
                                         [VppMplsLabel(32, ttl=53, exp=5)])

        #
        # add a recursive path, with output label, via the 1 label route
        #
        route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
                                    [VppRoutePath("10.0.0.1",
                                                  0xffffffff,
                                                  labels=[VppMplsLabel(44)])])
        route_11_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(32),
                                          VppMplsLabel(44)])

        self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257)

        #
        # add a recursive path, with 2 labels, via the 3 label route
        #
        route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
                                    [VppRoutePath("10.0.0.2",
                                                  0xffffffff,
                                                  labels=[VppMplsLabel(44),
                                                          VppMplsLabel(45)])])
        route_11_0_0_2.add_vpp_config()

        #
        # a stream that matches the route for 11.0.0.1, should pick up
        # the label stack for 11.0.0.1 and 10.0.0.1
        #
        tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(32),
                                          VppMplsLabel(33),
                                          VppMplsLabel(34),
                                          VppMplsLabel(44),
                                          VppMplsLabel(45)])

        self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257)

        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(32),
                                          VppMplsLabel(33),
                                          VppMplsLabel(34),
                                          VppMplsLabel(44),
                                          VppMplsLabel(45)])

        self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514)

        #
        # cleanup
        #
        route_11_0_0_2.remove_vpp_config()
        route_11_0_0_1.remove_vpp_config()
        route_10_0_0_2.remove_vpp_config()
        route_10_0_0_1.remove_vpp_config()

    def test_imposition_fragmentation(self):
        """ MPLS label imposition fragmentation test """

        #
        # Add a ipv4 non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(32)])])
        route_10_0_0_1.add_vpp_config()

        #
        # a stream that matches the route for 10.0.0.1
        # PG0 is in the default table
        #
        tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
        for i in range(0, 257):
            self.extend_packet(tx[i], 10000)

        #
        # 5 fragments per packet (257*5=1285)
        #
        rx = self.send_and_expect(self.pg0, tx, self.pg0, 1285)
        self.verify_capture_fragmented_labelled_ip4(self.pg0, rx, tx,
                                                    [VppMplsLabel(32)])

        #
        # cleanup
        #
        route_10_0_0_1.remove_vpp_config()

    def test_tunnel_pipe(self):
        """ MPLS Tunnel Tests - Pipe """

        #
        # Create a tunnel with two out labels
        #
        mpls_tun = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(44),
                                  VppMplsLabel(46)])])
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(44),
                                          VppMplsLabel(46)])

        #
        # add a labelled route through the new tunnel
        #
        route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index,
                                                  labels=[33])])
        route_10_0_0_4.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(44),
                                          VppMplsLabel(46),
                                          VppMplsLabel(33, ttl=255)])

        #
        # change tunnel's MTU to a low value
        #
        mpls_tun.set_l3_mtu(1200)

        # send IP into the tunnel to be fragmented
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3",
                                    payload_size=1500)
        rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)

        fake_tx = []
        for p in tx:
            fake_tx.append(p)
            fake_tx.append(p)
        self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
                                         [VppMplsLabel(44),
                                          VppMplsLabel(46)])

        # send MPLS into the tunnel to be fragmented
        tx = self.create_stream_ip4(self.pg0, "10.0.0.4",
                                    payload_size=1500)
        rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2)

        fake_tx = []
        for p in tx:
            fake_tx.append(p)
            fake_tx.append(p)
        self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx,
                                         [VppMplsLabel(44),
                                          VppMplsLabel(46),
                                          VppMplsLabel(33, ttl=255)])

    def test_tunnel_uniform(self):
        """ MPLS Tunnel Tests - Uniform """

        #
        # Create a tunnel with a single out label
        # The label stack is specified here from outer to inner
        #
        mpls_tun = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(44, ttl=32),
                                  VppMplsLabel(46, MplsLspMode.UNIFORM)])])
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3", ip_ttl=24)
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(44, ttl=32),
                                          VppMplsLabel(46, ttl=23)])

        #
        # add a labelled route through the new tunnel
        #
        route_10_0_0_4 = VppIpRoute(
            self, "10.0.0.4", 32,
            [VppRoutePath("0.0.0.0",
                          mpls_tun._sw_if_index,
                          labels=[VppMplsLabel(33, ttl=47)])])
        route_10_0_0_4.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture()
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(44, ttl=32),
                                          VppMplsLabel(46, ttl=47),
                                          VppMplsLabel(33, ttl=47)])

    def test_mpls_tunnel_many(self):
        """ MPLS Multiple Tunnels """

        for ii in range(10):
            mpls_tun = VppMPLSTunnelInterface(
                self,
                [VppRoutePath(self.pg0.remote_ip4,
                              self.pg0.sw_if_index,
                              labels=[VppMplsLabel(44, ttl=32),
                                      VppMplsLabel(46, MplsLspMode.UNIFORM)])])
            mpls_tun.add_vpp_config()
            mpls_tun.admin_up()

    def test_v4_exp_null(self):
        """ MPLS V4 Explicit NULL test """

        #
        # The first test case has an MPLS TTL of 0
        # all packet should be dropped
        #
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(0, ttl=0)])
        self.send_and_assert_no_replies(self.pg0, tx,
                                        "MPLS TTL=0 packets forwarded")

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(0)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip4(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        tx = self.create_stream_labelled_ip4(self.pg1, [VppMplsLabel(0)])
        rx = self.send_and_expect(self.pg1, tx, self.pg1)
        self.verify_capture_ip4(self.pg1, rx, tx)

    def test_v6_exp_null(self):
        """ MPLS V6 Explicit NULL test """

        #
        # a stream with a non-zero MPLS TTL
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(2)])
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6(self.pg0, rx, tx)

        #
        # a stream with a non-zero MPLS TTL
        # PG1 is in table 1
        # we are ensuring the post-pop lookup occurs in the VRF table
        #
        tx = self.create_stream_labelled_ip6(self.pg1, [VppMplsLabel(2)])
        rx = self.send_and_expect(self.pg1, tx, self.pg1)
        self.verify_capture_ip6(self.pg0, rx, tx)

    def test_deag(self):
        """ MPLS Deagg """

        #
        # A de-agg route - next-hop lookup in default table
        #
        route_34_eos = VppMplsRoute(self, 34, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=0)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(34)],
                                             ping=1,
                                             ip_itf=self.pg0)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)

        #
        # A de-agg route - next-hop lookup in non-default table
        #
        route_35_eos = VppMplsRoute(self, 35, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=1)])
        route_35_eos.add_vpp_config()

        #
        # ping an interface in the non-default table
        # PG0 is in the default table. packet arrive labelled in the
        # default table and egress unlabelled in the non-default
        #
        tx = self.create_stream_labelled_ip4(
            self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1)
        rx = self.send_and_expect(self.pg0, tx, self.pg1)
        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)

        #
        # Double pop
        #
        route_36_neos = VppMplsRoute(self, 36, 0,
                                     [VppRoutePath("0.0.0.0",
                                                   0xffffffff)])
        route_36_neos.add_vpp_config()

        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(36),
                                              VppMplsLabel(35)],
                                             ping=1, ip_itf=self.pg1)
        rx = self.send_and_expect(self.pg0, tx, self.pg1)
        self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)

        route_36_neos.remove_vpp_config()
        route_35_eos.remove_vpp_config()
        route_34_eos.remove_vpp_config()

    def test_interface_rx(self):
        """ MPLS Interface Receive """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an interface-rx on pg1 and matching a route in table 1
        # if the packet egresses, then we must have swapped to pg1
        # so as to have matched the route in table 1
        #
        route_34_eos = VppMplsRoute(
            self, 34, 1,
            [VppRoutePath("0.0.0.0",
                          self.pg1.sw_if_index,
                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)])
        route_34_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(34)],
                                             dst_ip="10.0.0.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg1)
        self.verify_capture_ip4(self.pg1, rx, tx)

    def test_mcast_mid_point(self):
        """ MPLS Multicast Mid Point """

        #
        # Add a non-recursive route that will forward the traffic
        # post-interface-rx
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    table_id=1,
                                    paths=[VppRoutePath(self.pg1.remote_ip4,
                                                        self.pg1.sw_if_index)])
        route_10_0_0_1.add_vpp_config()

        #
        # Add a mcast entry that replicate to pg2 and pg3
        # and replicate to a interface-rx (like a bud node would)
        #
        route_3400_eos = VppMplsRoute(
            self, 3400, 1,
            [VppRoutePath(self.pg2.remote_ip4,
                          self.pg2.sw_if_index,
                          labels=[VppMplsLabel(3401)]),
             VppRoutePath(self.pg3.remote_ip4,
                          self.pg3.sw_if_index,
                          labels=[VppMplsLabel(3402)]),
             VppRoutePath("0.0.0.0",
                          self.pg1.sw_if_index,
                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)],
            is_multicast=1)
        route_3400_eos.add_vpp_config()

        #
        # ping an interface in the default table
        # PG0 is in the default table
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0,
                                             [VppMplsLabel(3400, ttl=64)],
                                             n=257,
                                             dst_ip="10.0.0.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg1.get_capture(257)
        self.verify_capture_ip4(self.pg1, rx, tx)

        rx = self.pg2.get_capture(257)
        self.verify_capture_labelled(self.pg2, rx, tx,
                                     [VppMplsLabel(3401, ttl=63)])
        rx = self.pg3.get_capture(257)
        self.verify_capture_labelled(self.pg3, rx, tx,
                                     [VppMplsLabel(3402, ttl=63)])

    def test_mcast_head(self):
        """ MPLS Multicast Head-end """

        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t

        #
        # Create a multicast tunnel with two replications
        #
        mpls_tun = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg2.remote_ip4,
                          self.pg2.sw_if_index,
                          labels=[VppMplsLabel(42)]),
             VppRoutePath(self.pg3.remote_ip4,
                          self.pg3.sw_if_index,
                          labels=[VppMplsLabel(43)])],
            is_multicast=1)
        mpls_tun.add_vpp_config()
        mpls_tun.admin_up()

        #
        # add an unlabelled route through the new tunnel
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath("0.0.0.0",
                                                  mpls_tun._sw_if_index)])
        route_10_0_0_3.add_vpp_config()

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])

        #
        # An an IP multicast route via the tunnel
        # A (*,G).
        # one accepting interface, pg0, 1 forwarding interface via the tunnel
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
            [VppMRoutePath(self.pg0.sw_if_index,
                           MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
             VppMRoutePath(mpls_tun._sw_if_index,
                           MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()
        self.logger.info(self.vapi.cli("sh ip mfib index 0"))

        self.vapi.cli("clear trace")
        tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
        self.pg0.add_stream(tx)

        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg2.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
        rx = self.pg3.get_capture(257)
        self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])

    def test_mcast_ip4_tail(self):
        """ MPLS IPv4 Multicast Tail """

        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t

        #
        # Add a multicast route that will forward the traffic
        # post-disposition
        #
        route_232_1_1_1 = VppIpMRoute(
            self,
            "0.0.0.0",
            "232.1.1.1", 32,
            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
            table_id=1,
            paths=[VppMRoutePath(self.pg1.sw_if_index,
                                 MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
        route_232_1_1_1.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an rpf-id  and matching a route in table 1
        # if the packet egresses, then we must have matched the route in
        # table 1
        #
        route_34_eos = VppMplsRoute(
            self, 34, 1,
            [VppRoutePath("0.0.0.0",
                          0xffffffff,
                          nh_table_id=1,
                          rpf_id=55)],
            is_multicast=1,
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)

        route_34_eos.add_vpp_config()

        #
        # Drop due to interface lookup miss
        #
        self.vapi.cli("clear trace")
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="232.1.1.1", n=1)
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")

        #
        # set the RPF-ID of the entry to match the input packet's
        #
        route_232_1_1_1.update_rpf_id(55)
        self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1"))

        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="232.1.1.1")
        rx = self.send_and_expect(self.pg0, tx, self.pg1)
        self.verify_capture_ip4(self.pg1, rx, tx)

        #
        # disposed packets have an invalid IPv4 checksum
        #
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="232.1.1.1", n=65,
                                             chksum=1)
        self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")

        #
        # set the RPF-ID of the entry to not match the input packet's
        #
        route_232_1_1_1.update_rpf_id(56)
        tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="232.1.1.1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")

    def test_mcast_ip6_tail(self):
        """ MPLS IPv6 Multicast Tail """

        MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
        MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t

        #
        # Add a multicast route that will forward the traffic
        # post-disposition
        #
        route_ff = VppIpMRoute(
            self,
            "::",
            "ff01::1", 32,
            MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
            table_id=1,
            paths=[VppMRoutePath(self.pg1.sw_if_index,
                                 MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
                                 proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
        route_ff.add_vpp_config()

        #
        # An interface receive label that maps traffic to RX on interface
        # pg1
        # by injecting the packet in on pg0, which is in table 0
        # doing an rpf-id  and matching a route in table 1
        # if the packet egresses, then we must have matched the route in
        # table 1
        #
        route_34_eos = VppMplsRoute(
            self, 34, 1,
            [VppRoutePath("::",
                          0xffffffff,
                          nh_table_id=1,
                          rpf_id=55)],
            is_multicast=1,
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)

        route_34_eos.add_vpp_config()

        #
        # Drop due to interface lookup miss
        #
        tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="ff01::1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")

        #
        # set the RPF-ID of the entry to match the input packet's
        #
        route_ff.update_rpf_id(55)

        tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
                                             dst_ip="ff01::1")
        rx = self.send_and_expect(self.pg0, tx, self.pg1)
        self.verify_capture_ip6(self.pg1, rx, tx)

        #
        # disposed packets have hop-limit = 1
        #
        tx = self.create_stream_labelled_ip6(self.pg0,
                                             [VppMplsLabel(34)],
                                             dst_ip="ff01::1",
                                             hlim=1)
        rx = self.send_and_expect(self.pg0, tx, self.pg0)
        self.verify_capture_ip6_icmp(self.pg0, rx, tx)

        #
        # set the RPF-ID of the entry to not match the input packet's
        #
        route_ff.update_rpf_id(56)
        tx = self.create_stream_labelled_ip6(self.pg0,
                                             [VppMplsLabel(34)],
                                             dst_ip="ff01::1")
        self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")

    def test_6pe(self):
        """ MPLS 6PE """

        #
        # Add a non-recursive route with a single out label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[VppMplsLabel(45)])])
        route_10_0_0_1.add_vpp_config()

        # bind a local label to the route
        binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
        binding.add_vpp_config()

        #
        # a labelled v6 route that resolves through the v4
        #
        route_2001_3 = VppIpRoute(
            self, "2001::3", 128,
            [VppRoutePath("10.0.0.1",
                          INVALID_INDEX,
                          labels=[VppMplsLabel(32)])])
        route_2001_3.add_vpp_config()

        tx = self.create_stream_ip6(self.pg0, "2001::3")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)

        self.verify_capture_labelled_ip6(self.pg0, rx, tx,
                                         [VppMplsLabel(45),
                                          VppMplsLabel(32)])

        #
        # and a v4 recursive via the v6
        #
        route_20_3 = VppIpRoute(
            self, "20.0.0.3", 32,
            [VppRoutePath("2001::3",
                          INVALID_INDEX,
                          labels=[VppMplsLabel(99)])])
        route_20_3.add_vpp_config()

        tx = self.create_stream_ip4(self.pg0, "20.0.0.3")
        rx = self.send_and_expect(self.pg0, tx, self.pg0)

        self.verify_capture_labelled_ip4(self.pg0, rx, tx,
                                         [VppMplsLabel(45),
                                          VppMplsLabel(32),
                                          VppMplsLabel(99)])


class TestMPLSDisabled(VppTestCase):
    """ MPLS disabled """

    @classmethod
    def setUpClass(cls):
        super(TestMPLSDisabled, cls).setUpClass()

    @classmethod
    def tearDownClass(cls):
        super(TestMPLSDisabled, cls).tearDownClass()

    def setUp(self):
        super(TestMPLSDisabled, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(2))

        self.tbl = VppMplsTable(self, 0)
        self.tbl.add_vpp_config()

        # PG0 is MPLS enabled
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()

        # PG 1 is not MPLS enabled
        self.pg1.admin_up()

    def tearDown(self):
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.admin_down()

        self.pg0.disable_mpls()
        super(TestMPLSDisabled, self).tearDown()

    def test_mpls_disabled(self):
        """ MPLS Disabled """

        self.logger.info(self.vapi.cli("show mpls interface"))
        self.logger.info(self.vapi.cli("show mpls interface pg1"))
        self.logger.info(self.vapi.cli("show mpls interface pg0"))

        tx = (Ether(src=self.pg1.remote_mac,
                    dst=self.pg1.local_mac) /
              MPLS(label=32, ttl=64) /
              IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
              UDP(sport=1234, dport=1234) /
              Raw(b'\xa5' * 100))

        #
        # A simple MPLS xconnect - eos label in label out
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[33])])
        route_32_eos.add_vpp_config()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")

        #
        # MPLS enable PG1
        #
        self.pg1.enable_mpls()

        self.logger.info(self.vapi.cli("show mpls interface"))
        self.logger.info(self.vapi.cli("show mpls interface pg1"))

        #
        # Now we get packets through
        #
        self.pg1.add_stream(tx)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx = self.pg0.get_capture(1)

        #
        # Disable PG1
        #
        self.pg1.disable_mpls()

        #
        # PG1 does not forward IP traffic
        #
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
        self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")


class TestMPLSPIC(VppTestCase):
    """ MPLS Prefix-Independent Convergence (PIC) edge convergence """

    @classmethod
    def setUpClass(cls):
        super(TestMPLSPIC, cls).setUpClass()

    @classmethod
    def tearDownClass(cls):
        super(TestMPLSPIC, cls).tearDownClass()

    def setUp(self):
        super(TestMPLSPIC, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(4))

        mpls_tbl = VppMplsTable(self, 0)
        mpls_tbl.add_vpp_config()
        tbl4 = VppIpTable(self, 1)
        tbl4.add_vpp_config()
        tbl6 = VppIpTable(self, 1, is_ip6=1)
        tbl6.add_vpp_config()

        # core links
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.resolve_arp()
        self.pg0.enable_mpls()

        self.pg1.admin_up()
        self.pg1.config_ip4()
        self.pg1.resolve_arp()
        self.pg1.enable_mpls()

        # VRF (customer facing) link
        self.pg2.admin_up()
        self.pg2.set_table_ip4(1)
        self.pg2.config_ip4()
        self.pg2.resolve_arp()
        self.pg2.set_table_ip6(1)
        self.pg2.config_ip6()
        self.pg2.resolve_ndp()

        self.pg3.admin_up()
        self.pg3.set_table_ip4(1)
        self.pg3.config_ip4()
        self.pg3.resolve_arp()
        self.pg3.set_table_ip6(1)
        self.pg3.config_ip6()
        self.pg3.resolve_ndp()

    def tearDown(self):
        self.pg0.disable_mpls()
        self.pg1.disable_mpls()
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.set_table_ip4(0)
            i.set_table_ip6(0)
            i.admin_down()
        super(TestMPLSPIC, self).tearDown()

    def test_mpls_ibgp_pic(self):
        """ MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence

        1) setup many iBGP VPN routes via a pair of iBGP peers.
        2) Check EMCP forwarding to these peers
        3) withdraw the IGP route to one of these peers.
        4) check forwarding continues to the remaining peer
        """

        #
        # IGP+LDP core routes
        #
        core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
                                    [VppRoutePath(self.pg0.remote_ip4,
                                                  self.pg0.sw_if_index,
                                                  labels=[45])])
        core_10_0_0_45.add_vpp_config()

        core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[46])])
        core_10_0_0_46.add_vpp_config()

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        pkts = []
        for ii in range(NUM_PKTS):
            dst = "192.168.1.%d" % ii
            vpn_routes.append(VppIpRoute(
                self, dst, 32,
                [VppRoutePath(
                    "10.0.0.45",
                    0xffffffff,
                    labels=[145],
                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST),
                 VppRoutePath(
                     "10.0.0.46",
                     0xffffffff,
                     labels=[146],
                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)],
                table_id=1))
            vpn_routes[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg2.local_mac,
                              src=self.pg2.remote_mac) /
                        IP(src=self.pg2.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw(b'\xa5' * 100))

        #
        # Send the packet stream (one pkt to each VPN route)
        #  - expect a 50-50 split of the traffic
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(NUM_PKTS)
        rx1 = self.pg1._get_capture(NUM_PKTS)

        # not testing the LB hashing algorithm so we're not concerned
        # with the split ratio, just as long as neither is 0
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (post-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # Withdraw one of the IGP routes
        #
        core_10_0_0_46.remove_vpp_config()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.vapi.ppcli("clear trace")
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")

        #
        # packets should still be forwarded through the remaining peer
        #
        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # Add the IGP route back and we return to load-balancing
        #
        core_10_0_0_46.add_vpp_config()

        self.pg2.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg0._get_capture(NUM_PKTS)
        rx1 = self.pg1._get_capture(NUM_PKTS)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))

    def test_mpls_ebgp_pic(self):
        """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence

        1) setup many eBGP VPN routes via a pair of eBGP peers.
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(NUM_PKTS):
            dst = "192.168.1.%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(
                self, dst, 32,
                [VppRoutePath(
                    self.pg2.remote_ip4,
                    0xffffffff,
                    nh_table_id=1,
                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
                 VppRoutePath(
                     self.pg3.remote_ip4,
                     0xffffffff,
                     nh_table_id=1,
                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
                table_id=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
                                              ip_table_id=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IP(src=self.pg0.remote_ip4, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw(b'\xa5' * 100))

        #
        # Send the packet stream (one pkt to each VPN route)
        #  - expect a 50-50 split of the traffic
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(NUM_PKTS)
        rx1 = self.pg3._get_capture(NUM_PKTS)

        # not testing the LB hashing algorithm so we're not concerned
        # with the split ratio, just as long as neither is 0
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (post-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        #
        self.pg2.unconfig_ip4()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")

        #
        # packets should still be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # put the connected routes back
        #
        self.pg2.config_ip4()
        self.pg2.resolve_arp()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(NUM_PKTS)
        rx1 = self.pg3._get_capture(NUM_PKTS)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))

    def test_mpls_v6_ebgp_pic(self):
        """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence

        1) setup many eBGP VPNv6 routes via a pair of eBGP peers
        2) Check EMCP forwarding to these peers
        3) withdraw one eBGP path - expect LB across remaining eBGP
        """

        #
        # Lot's of VPN routes. We need more the 64 so VPP will build
        # the fast convergence indirection
        #
        vpn_routes = []
        vpn_bindings = []
        pkts = []
        for ii in range(NUM_PKTS):
            dst = "3000::%d" % ii
            local_label = 1600 + ii
            vpn_routes.append(VppIpRoute(
                self, dst, 128,
                [VppRoutePath(
                    self.pg2.remote_ip6,
                    0xffffffff,
                    nh_table_id=1,
                    flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
                 VppRoutePath(
                     self.pg3.remote_ip6,
                     0xffffffff,
                     nh_table_id=1,
                     flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
                table_id=1))
            vpn_routes[ii].add_vpp_config()

            vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
                                              ip_table_id=1))
            vpn_bindings[ii].add_vpp_config()

            pkts.append(Ether(dst=self.pg0.local_mac,
                              src=self.pg0.remote_mac) /
                        MPLS(label=local_label, ttl=64) /
                        IPv6(src=self.pg0.remote_ip6, dst=dst) /
                        UDP(sport=1234, dport=1234) /
                        Raw(b'\xa5' * 100))
            self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(NUM_PKTS)
        rx1 = self.pg3._get_capture(NUM_PKTS)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))

        #
        # use a test CLI command to stop the FIB walk process, this
        # will prevent the FIB converging the VPN routes and thus allow
        # us to probe the interim (post-fail, pre-converge) state
        #
        self.vapi.ppcli("test fib-walk-process disable")

        #
        # withdraw the connected prefix on the interface.
        # and shutdown the interface so the ND cache is flushed.
        #
        self.pg2.unconfig_ip6()
        self.pg2.admin_down()

        #
        # now all packets should be forwarded through the remaining peer
        #
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # enable the FIB walk process to converge the FIB
        #
        self.vapi.ppcli("test fib-walk-process enable")
        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg3.get_capture(NUM_PKTS)
        self.assertEqual(len(pkts), len(rx0),
                         "Expected all (%s) packets across single path. "
                         "rx0: %s." % (len(pkts), len(rx0)))

        #
        # put the connected routes back
        #
        self.logger.info(self.vapi.cli("sh log"))
        self.pg2.admin_up()
        self.pg2.config_ip6()
        self.pg2.resolve_ndp()

        self.pg0.add_stream(pkts)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rx0 = self.pg2._get_capture(NUM_PKTS)
        rx1 = self.pg3._get_capture(NUM_PKTS)
        self.assertNotEqual(0, len(rx0))
        self.assertNotEqual(0, len(rx1))
        self.assertEqual(len(pkts), len(rx0) + len(rx1),
                         "Expected all (%s) packets across both ECMP paths. "
                         "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))


class TestMPLSL2(VppTestCase):
    """ MPLS-L2 """

    @classmethod
    def setUpClass(cls):
        super(TestMPLSL2, cls).setUpClass()

    @classmethod
    def tearDownClass(cls):
        super(TestMPLSL2, cls).tearDownClass()

    def setUp(self):
        super(TestMPLSL2, self).setUp()

        # create 2 pg interfaces
        self.create_pg_interfaces(range(2))

        # create the default MPLS table
        self.tables = []
        tbl = VppMplsTable(self, 0)
        tbl.add_vpp_config()
        self.tables.append(tbl)

        # use pg0 as the core facing interface, don't resolve ARP
        self.pg0.admin_up()
        self.pg0.config_ip4()
        self.pg0.enable_mpls()

        # use the other 2 for customer facing L2 links
        for i in self.pg_interfaces[1:]:
            i.admin_up()

    def tearDown(self):
        for i in self.pg_interfaces[1:]:
            i.admin_down()

        self.pg0.disable_mpls()
        self.pg0.unconfig_ip4()
        self.pg0.admin_down()
        super(TestMPLSL2, self).tearDown()

    def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels):
        capture = verify_filter(capture, sent)

        self.assertEqual(len(capture), len(sent))

        for i in range(len(capture)):
            tx = sent[i]
            rx = capture[i]

            # the MPLS TTL is 255 since it enters a new tunnel
            verify_mpls_stack(self, rx, mpls_labels)

            tx_eth = tx[Ether]
            rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload))

            self.assertEqual(rx_eth.src, tx_eth.src)
            self.assertEqual(rx_eth.dst, tx_eth.dst)

    def verify_arp_req(self, rx, smac, sip, dip):
        ether = rx[Ether]
        self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
        self.assertEqual(ether.src, smac)

        arp = rx[ARP]
        self.assertEqual(arp.hwtype, 1)
        self.assertEqual(arp.ptype, 0x800)
        self.assertEqual(arp.hwlen, 6)
        self.assertEqual(arp.plen, 4)
        self.assertEqual(arp.op, ARP.who_has)
        self.assertEqual(arp.hwsrc, smac)
        self.assertEqual(arp.hwdst, "00:00:00:00:00:00")
        self.assertEqual(arp.psrc, sip)
        self.assertEqual(arp.pdst, dip)

    def test_vpws(self):
        """ Virtual Private Wire Service """

        #
        # Create an MPLS tunnel that pushes 1 label
        # For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos
        # information is not in the packet, but we test it works anyway
        #
        mpls_tun_1 = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])],
            is_l2=1)
        mpls_tun_1.add_vpp_config()
        mpls_tun_1.admin_up()

        #
        # Create a label entry to for 55 that does L2 input to the tunnel
        #
        route_55_eos = VppMplsRoute(
            self, 55, 1,
            [VppRoutePath("0.0.0.0",
                          mpls_tun_1.sw_if_index,
                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
        route_55_eos.add_vpp_config()

        #
        # Cross-connect the tunnel with one of the customers L2 interfaces
        #
        self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
                                               mpls_tun_1.sw_if_index,
                                               enable=1)
        self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index,
                                               self.pg1.sw_if_index,
                                               enable=1)

        #
        # inject a packet from the core
        #
        pcore = (Ether(dst=self.pg0.local_mac,
                       src=self.pg0.remote_mac) /
                 MPLS(label=55, ttl=64) /
                 Ether(dst="00:00:de:ad:ba:be",
                       src="00:00:de:ad:be:ef") /
                 IP(src="10.10.10.10", dst="11.11.11.11") /
                 UDP(sport=1234, dport=1234) /
                 Raw(b'\xa5' * 100))

        tx0 = pcore * NUM_PKTS
        rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
        payload = pcore[MPLS].payload

        self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst)
        self.assertEqual(rx0[0][Ether].src, payload[Ether].src)

        #
        # Inject a packet from the customer/L2 side
        # there's no resolved ARP entry so the first packet we see should be
        # an ARP request
        #
        tx1 = pcore[MPLS].payload
        rx1 = self.send_and_expect(self.pg1, [tx1], self.pg0)

        self.verify_arp_req(rx1[0],
                            self.pg0.local_mac,
                            self.pg0.local_ip4,
                            self.pg0.remote_ip4)

        #
        # resolve the ARP entries and send again
        #
        self.pg0.resolve_arp()
        tx1 = pcore[MPLS].payload * NUM_PKTS
        rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)

        self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])

    def test_vpls(self):
        """ Virtual Private LAN Service """

        # we skipped this in the setup
        self.pg0.resolve_arp()

        #
        # Create a L2 MPLS tunnels
        #
        mpls_tun1 = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(42)])],
            is_l2=1)
        mpls_tun1.add_vpp_config()
        mpls_tun1.admin_up()

        mpls_tun2 = VppMPLSTunnelInterface(
            self,
            [VppRoutePath(self.pg0.remote_ip4,
                          self.pg0.sw_if_index,
                          labels=[VppMplsLabel(43)])],
            is_l2=1)
        mpls_tun2.add_vpp_config()
        mpls_tun2.admin_up()

        #
        # Create a label entries, 55 and 56, that do L2 input to the tunnel
        # the latter includes a Psuedo Wire Control Word
        #
        route_55_eos = VppMplsRoute(
            self, 55, 1,
            [VppRoutePath("0.0.0.0",
                          mpls_tun1.sw_if_index,
                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)

        route_56_eos = VppMplsRoute(
            self, 56, 1,
            [VppRoutePath("0.0.0.0",
                          mpls_tun2.sw_if_index,
                          type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
                          flags=FibPathFlags.FIB_PATH_FLAG_POP_PW_CW,
                          proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
            eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)

        # move me
        route_56_eos.add_vpp_config()
        route_55_eos.add_vpp_config()

        self.logger.info(self.vapi.cli("sh mpls fib 56"))

        #
        # add to tunnel to the customers bridge-domain
        #
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1)
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1)
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)

        #
        # Packet from host on the customer interface to each host
        # reachable over the core, and vice-versa
        #
        p_cust1 = (Ether(dst="00:00:de:ad:ba:b1",
                         src="00:00:de:ad:be:ef") /
                   IP(src="10.10.10.10", dst="11.11.11.11") /
                   UDP(sport=1234, dport=1234) /
                   Raw(b'\xa5' * 100))
        p_cust2 = (Ether(dst="00:00:de:ad:ba:b2",
                         src="00:00:de:ad:be:ef") /
                   IP(src="10.10.10.10", dst="11.11.11.12") /
                   UDP(sport=1234, dport=1234) /
                   Raw(b'\xa5' * 100))
        p_core1 = (Ether(dst=self.pg0.local_mac,
                         src=self.pg0.remote_mac) /
                   MPLS(label=55, ttl=64) /
                   Ether(src="00:00:de:ad:ba:b1",
                         dst="00:00:de:ad:be:ef") /
                   IP(dst="10.10.10.10", src="11.11.11.11") /
                   UDP(sport=1234, dport=1234) /
                   Raw(b'\xa5' * 100))
        p_core2 = (Ether(dst=self.pg0.local_mac,
                         src=self.pg0.remote_mac) /
                   MPLS(label=56, ttl=64) /
                   Raw(b'\x01' * 4) /  # PW CW
                   Ether(src="00:00:de:ad:ba:b2",
                         dst="00:00:de:ad:be:ef") /
                   IP(dst="10.10.10.10", src="11.11.11.12") /
                   UDP(sport=1234, dport=1234) /
                   Raw(b'\xa5' * 100))

        #
        # The BD is learning, so send in one of each packet to learn
        #

        # 2 packets due to BD flooding
        rx = self.send_and_expect(self.pg1, p_cust1, self.pg0, n_rx=2)
        rx = self.send_and_expect(self.pg1, p_cust2, self.pg0, n_rx=2)

        # we've learnt this so expect it be be forwarded not flooded
        rx = self.send_and_expect(self.pg0, [p_core1], self.pg1)
        self.assertEqual(rx[0][Ether].dst, p_cust1[Ether].src)
        self.assertEqual(rx[0][Ether].src, p_cust1[Ether].dst)

        rx = self.send_and_expect(self.pg0, [p_core2], self.pg1)
        self.assertEqual(rx[0][Ether].dst, p_cust2[Ether].src)
        self.assertEqual(rx[0][Ether].src, p_cust2[Ether].dst)

        #
        # now a stream in each direction from each host
        #
        rx = self.send_and_expect(self.pg1, p_cust1 * NUM_PKTS, self.pg0)
        self.verify_capture_tunneled_ethernet(rx, p_cust1 * NUM_PKTS,
                                              [VppMplsLabel(42)])

        rx = self.send_and_expect(self.pg1, p_cust2 * NUM_PKTS, self.pg0)
        self.verify_capture_tunneled_ethernet(rx, p_cust2 * NUM_PKTS,
                                              [VppMplsLabel(43)])

        rx = self.send_and_expect(self.pg0, p_core1 * NUM_PKTS, self.pg1)
        rx = self.send_and_expect(self.pg0, p_core2 * NUM_PKTS, self.pg1)

        #
        # remove interfaces from customers bridge-domain
        #
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1, enable=0)
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1, enable=0)
        self.vapi.sw_interface_set_l2_bridge(
            rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)